Searched refs:async (Results 1 - 200 of 725) sorted by relevance

1234

/linux-4.1.27/drivers/staging/comedi/
H A Dcomedi_buf.c64 struct comedi_async *async = s->async; __comedi_buf_free() local
68 if (async->prealloc_buf) { __comedi_buf_free()
69 vunmap(async->prealloc_buf); __comedi_buf_free()
70 async->prealloc_buf = NULL; __comedi_buf_free()
71 async->prealloc_bufsz = 0; __comedi_buf_free()
75 bm = async->buf_map; __comedi_buf_free()
76 async->buf_map = NULL; __comedi_buf_free()
85 struct comedi_async *async = s->async; __comedi_buf_alloc() local
98 bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL); __comedi_buf_alloc()
104 async->buf_map = bm; __comedi_buf_alloc()
145 async->prealloc_buf = vmap(pages, n_pages, VM_MAP, __comedi_buf_alloc()
164 /* returns s->async->buf_map and increments its kref refcount */
168 struct comedi_async *async = s->async; comedi_buf_map_from_subdev_get() local
172 if (!async) comedi_buf_map_from_subdev_get()
176 bm = async->buf_map; comedi_buf_map_from_subdev_get()
189 struct comedi_buf_map *bm = s->async->buf_map; comedi_buf_is_mmapped()
197 struct comedi_async *async = s->async; comedi_buf_alloc() local
203 if (async->prealloc_buf && async->prealloc_bufsz == new_size) comedi_buf_alloc()
215 if (!async->prealloc_buf) { comedi_buf_alloc()
221 async->prealloc_bufsz = new_size; comedi_buf_alloc()
228 struct comedi_async *async = s->async; comedi_buf_reset() local
230 async->buf_write_alloc_count = 0; comedi_buf_reset()
231 async->buf_write_count = 0; comedi_buf_reset()
232 async->buf_read_alloc_count = 0; comedi_buf_reset()
233 async->buf_read_count = 0; comedi_buf_reset()
235 async->buf_write_ptr = 0; comedi_buf_reset()
236 async->buf_read_ptr = 0; comedi_buf_reset()
238 async->cur_chan = 0; comedi_buf_reset()
239 async->scans_done = 0; comedi_buf_reset()
240 async->scan_progress = 0; comedi_buf_reset()
241 async->munge_chan = 0; comedi_buf_reset()
242 async->munge_count = 0; comedi_buf_reset()
243 async->munge_ptr = 0; comedi_buf_reset()
245 async->events = 0; comedi_buf_reset()
250 struct comedi_async *async = s->async; comedi_buf_write_n_available() local
251 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz; comedi_buf_write_n_available()
253 return free_end - async->buf_write_alloc_count; comedi_buf_write_n_available()
260 struct comedi_async *async = s->async; comedi_buf_write_alloc() local
266 async->buf_write_alloc_count += nbytes; comedi_buf_write_alloc()
269 * ensure the async buffer 'counts' are read and updated comedi_buf_write_alloc()
285 struct comedi_async *async = s->async; comedi_buf_munge() local
289 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) { comedi_buf_munge()
290 async->munge_count += num_bytes; comedi_buf_munge()
299 buf_end = async->prealloc_bufsz - async->munge_ptr; comedi_buf_munge()
304 async->prealloc_buf + async->munge_ptr, comedi_buf_munge()
305 block_size, async->munge_chan); comedi_buf_munge()
309 * async buffer munge_count is incremented comedi_buf_munge()
313 async->munge_chan += block_size / num_sample_bytes; comedi_buf_munge()
314 async->munge_chan %= async->cmd.chanlist_len; comedi_buf_munge()
315 async->munge_count += block_size; comedi_buf_munge()
316 async->munge_ptr += block_size; comedi_buf_munge()
317 async->munge_ptr %= async->prealloc_bufsz; comedi_buf_munge()
327 struct comedi_async *async = s->async; comedi_buf_write_n_allocated() local
329 return async->buf_write_alloc_count - async->buf_write_count; comedi_buf_write_n_allocated()
336 struct comedi_async *async = s->async; comedi_buf_write_free() local
342 async->buf_write_count += nbytes; comedi_buf_write_free()
343 async->buf_write_ptr += nbytes; comedi_buf_write_free()
344 comedi_buf_munge(s, async->buf_write_count - async->munge_count); comedi_buf_write_free()
345 if (async->buf_write_ptr >= async->prealloc_bufsz) comedi_buf_write_free()
346 async->buf_write_ptr %= async->prealloc_bufsz; comedi_buf_write_free()
354 struct comedi_async *async = s->async; comedi_buf_read_n_available() local
357 if (!async) comedi_buf_read_n_available()
360 num_bytes = async->munge_count - async->buf_read_count; comedi_buf_read_n_available()
363 * ensure the async buffer 'counts' are read before we comedi_buf_read_n_available()
376 struct comedi_async *async = s->async; comedi_buf_read_alloc() local
379 available = async->munge_count - async->buf_read_alloc_count; comedi_buf_read_alloc()
383 async->buf_read_alloc_count += nbytes; comedi_buf_read_alloc()
386 * ensure the async buffer 'counts' are read before we comedi_buf_read_alloc()
395 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async) comedi_buf_read_n_allocated() argument
397 return async->buf_read_alloc_count - async->buf_read_count; comedi_buf_read_n_allocated()
404 struct comedi_async *async = s->async; comedi_buf_read_free() local
409 * the async read count is incremented comedi_buf_read_free()
413 allocated = comedi_buf_read_n_allocated(async); comedi_buf_read_free()
417 async->buf_read_count += nbytes; comedi_buf_read_free()
418 async->buf_read_ptr += nbytes; comedi_buf_read_free()
419 async->buf_read_ptr %= async->prealloc_bufsz; comedi_buf_read_free()
427 struct comedi_async *async = s->async; comedi_buf_memcpy_to() local
428 unsigned int write_ptr = async->buf_write_ptr; comedi_buf_memcpy_to()
433 if (write_ptr + num_bytes > async->prealloc_bufsz) comedi_buf_memcpy_to()
434 block_size = async->prealloc_bufsz - write_ptr; comedi_buf_memcpy_to()
438 memcpy(async->prealloc_buf + write_ptr, data, block_size); comedi_buf_memcpy_to()
451 struct comedi_async *async = s->async; comedi_buf_memcpy_from() local
452 unsigned int read_ptr = async->buf_read_ptr; comedi_buf_memcpy_from()
457 src = async->prealloc_buf + read_ptr; comedi_buf_memcpy_from()
459 if (nbytes >= async->prealloc_bufsz - read_ptr) comedi_buf_memcpy_from()
460 block_size = async->prealloc_bufsz - read_ptr; comedi_buf_memcpy_from()
497 s->async->events |= COMEDI_CB_OVERFLOW; comedi_buf_write_samples()
509 s->async->events |= COMEDI_CB_BLOCK; comedi_buf_write_samples()
546 s->async->events |= COMEDI_CB_BLOCK; comedi_buf_read_samples()
H A Dcomedi_fops.c333 struct comedi_async *async = s->async; resize_async_buffer() local
336 if (new_size > async->max_bufsize) resize_async_buffer()
364 s->index, async->prealloc_bufsz); resize_async_buffer()
384 if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) max_read_buffer_kb_show()
385 size = s->async->max_bufsize / 1024; max_read_buffer_kb_show()
415 if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) max_read_buffer_kb_store()
416 s->async->max_bufsize = size; max_read_buffer_kb_store()
440 if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) read_buffer_kb_show()
441 size = s->async->prealloc_bufsz / 1024; read_buffer_kb_show()
471 if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) read_buffer_kb_store()
497 if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) max_write_buffer_kb_show()
498 size = s->async->max_bufsize / 1024; max_write_buffer_kb_show()
528 if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) max_write_buffer_kb_store()
529 s->async->max_bufsize = size; max_write_buffer_kb_store()
553 if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) write_buffer_kb_show()
554 size = s->async->prealloc_bufsz / 1024; write_buffer_kb_show()
584 if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) write_buffer_kb_store()
654 * comedi_is_subdevice_running - check if async command running on subdevice
705 struct comedi_async *async = s->async; do_become_nonbusy() local
708 if (async) { do_become_nonbusy()
710 async->inttrig = NULL; do_become_nonbusy()
711 kfree(async->cmd.chanlist); do_become_nonbusy()
712 async->cmd.chanlist = NULL; do_become_nonbusy()
714 wake_up_interruptible_all(&async->wait_head); do_become_nonbusy()
717 "BUG: (?) do_become_nonbusy called with async=NULL\n"); do_become_nonbusy()
744 if (s->async) comedi_device_cancel_all()
761 if (s->async && comedi_buf_is_mmapped(s)) is_device_busy()
837 struct comedi_async *async; do_bufconfig_ioctl() local
848 async = s->async; do_bufconfig_ioctl()
850 if (!async) { do_bufconfig_ioctl()
852 "subdevice does not have async capability\n"); do_bufconfig_ioctl()
862 async->max_bufsize = bc.maximum_size; do_bufconfig_ioctl()
871 bc.size = async->prealloc_bufsz; do_bufconfig_ioctl()
872 bc.maximum_size = async->max_bufsize; do_bufconfig_ioctl()
1079 struct comedi_async *async; do_bufinfo_ioctl() local
1089 async = s->async; do_bufinfo_ioctl()
1091 if (!async) { do_bufinfo_ioctl()
1093 "subdevice does not have async capability\n"); do_bufinfo_ioctl()
1110 if (bi.bytes_read && !(async->cmd.flags & CMDF_WRITE)) { do_bufinfo_ioctl()
1120 if (bi.bytes_written && (async->cmd.flags & CMDF_WRITE)) { do_bufinfo_ioctl()
1127 bi.buf_write_count = async->buf_write_count; do_bufinfo_ioctl()
1128 bi.buf_write_ptr = async->buf_write_ptr; do_bufinfo_ioctl()
1129 bi.buf_read_count = async->buf_read_count; do_bufinfo_ioctl()
1130 bi.buf_read_ptr = async->buf_read_ptr; do_bufinfo_ioctl()
1253 if (!s->async) { parse_insn()
1254 dev_dbg(dev->class_dev, "no async\n"); parse_insn()
1258 if (!s->async->inttrig) { parse_insn()
1263 ret = s->async->inttrig(dev, s, data[0]); parse_insn()
1560 if (!s->do_cmd || !s->do_cmdtest || !s->async) { __comedi_get_user_cmd()
1637 struct comedi_async *async; do_cmd_ioctl() local
1650 async = s->async; do_cmd_ioctl()
1671 async->cmd = cmd; do_cmd_ioctl()
1672 async->cmd.data = NULL; do_cmd_ioctl()
1675 ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &async->cmd); do_cmd_ioctl()
1679 ret = s->do_cmdtest(dev, s, &async->cmd); do_cmd_ioctl()
1681 if (async->cmd.flags & CMDF_BOGUS || ret) { do_cmd_ioctl()
1683 cmd = async->cmd; do_cmd_ioctl()
1696 if (!async->prealloc_bufsz) { do_cmd_ioctl()
1704 async->cb_mask = COMEDI_CB_BLOCK | COMEDI_CB_CANCEL_MASK; do_cmd_ioctl()
1705 if (async->cmd.flags & CMDF_WAKE_EOS) do_cmd_ioctl()
1706 async->cb_mask |= COMEDI_CB_EOS; do_cmd_ioctl()
1870 if (!s->async) do_cancel_ioctl()
1950 if (s_old && s_old->busy == file && s_old->async && do_setrsubd_ioctl()
1951 !(s_old->async->cmd.flags & CMDF_WRITE)) do_setrsubd_ioctl()
1992 if (s_old && s_old->busy == file && s_old->async && do_setwsubd_ioctl()
1993 (s_old->async->cmd.flags & CMDF_WRITE)) do_setwsubd_ioctl()
2140 struct comedi_async *async; comedi_mmap() local
2172 async = s->async; comedi_mmap()
2173 if (!async) { comedi_mmap()
2185 if (size > async->prealloc_bufsz) { comedi_mmap()
2241 if (s && s->async) { comedi_poll()
2242 poll_wait(file, &s->async->wait_head, wait); comedi_poll()
2244 (s->async->cmd.flags & CMDF_WRITE) || comedi_poll()
2250 if (s && s->async) { comedi_poll()
2253 poll_wait(file, &s->async->wait_head, wait); comedi_poll()
2254 comedi_buf_write_alloc(s, s->async->prealloc_bufsz); comedi_poll()
2256 !(s->async->cmd.flags & CMDF_WRITE) || comedi_poll()
2270 struct comedi_async *async; comedi_write() local
2291 if (!s || !s->async) { comedi_write()
2296 async = s->async; comedi_write()
2304 if (!(async->cmd.flags & CMDF_WRITE)) { comedi_write()
2309 add_wait_queue(&async->wait_head, &wait); comedi_write()
2328 * remove task from the async wait queue before comedi_write()
2332 remove_wait_queue(&async->wait_head, &wait); comedi_write()
2348 s == new_s && new_s->async == async) comedi_write()
2358 if (async->buf_write_ptr + m > async->prealloc_bufsz) comedi_write()
2359 m = async->prealloc_bufsz - async->buf_write_ptr; comedi_write()
2360 comedi_buf_write_alloc(s, async->prealloc_bufsz); comedi_write()
2382 if (!(async->cmd.flags & CMDF_WRITE)) { comedi_write()
2389 m = copy_from_user(async->prealloc_buf + async->buf_write_ptr, comedi_write()
2405 remove_wait_queue(&async->wait_head, &wait); comedi_write()
2417 struct comedi_async *async; comedi_read() local
2438 if (!s || !s->async) { comedi_read()
2443 async = s->async; comedi_read()
2450 if (async->cmd.flags & CMDF_WRITE) { comedi_read()
2455 add_wait_queue(&async->wait_head, &wait); comedi_read()
2462 if (async->buf_read_ptr + m > async->prealloc_bufsz) comedi_read()
2463 m = async->prealloc_bufsz - async->buf_read_ptr; comedi_read()
2495 if (async->cmd.flags & CMDF_WRITE) { comedi_read()
2501 m = copy_to_user(buf, async->prealloc_buf + comedi_read()
2502 async->buf_read_ptr, n); comedi_read()
2517 remove_wait_queue(&async->wait_head, &wait); comedi_read()
2538 s == new_s && new_s->async == async) { comedi_read()
2672 struct comedi_async *async = s->async; comedi_event() local
2679 events = async->events; comedi_event()
2680 async->events = 0; comedi_event()
2696 if (async->cb_mask & events) { comedi_event()
2697 wake_up_interruptible(&async->wait_head); comedi_event()
2698 si_code = async->cmd.flags & CMDF_WRITE ? POLL_OUT : POLL_IN; comedi_event()
H A Ddrivers.c131 if (s->async) { comedi_device_detach_cleanup()
133 kfree(s->async); comedi_device_detach_cleanup()
317 struct comedi_cmd *cmd = &s->async->cmd; comedi_bytes_per_scan()
341 * If nscans is 0, the number of scans available in the async buffer will be
344 * If the async command has a stop_src of TRIG_COUNT, the nscans will be
353 struct comedi_async *async = s->async; comedi_nscans_left() local
354 struct comedi_cmd *cmd = &async->cmd; comedi_nscans_left()
365 if (async->scans_done < cmd->stop_arg) comedi_nscans_left()
366 scans_left = cmd->stop_arg - async->scans_done; comedi_nscans_left()
386 struct comedi_async *async = s->async; comedi_nsamples_left() local
387 struct comedi_cmd *cmd = &async->cmd; comedi_nsamples_left()
394 comedi_bytes_to_samples(s, async->scan_progress); comedi_nsamples_left()
422 struct comedi_async *async = s->async; comedi_inc_scan_progress() local
423 struct comedi_cmd *cmd = &async->cmd; comedi_inc_scan_progress()
428 async->cur_chan += comedi_bytes_to_samples(s, num_bytes); comedi_inc_scan_progress()
429 async->cur_chan %= cmd->chanlist_len; comedi_inc_scan_progress()
432 async->scan_progress += num_bytes; comedi_inc_scan_progress()
433 if (async->scan_progress >= scan_length) { comedi_inc_scan_progress()
434 unsigned int nscans = async->scan_progress / scan_length; comedi_inc_scan_progress()
436 if (async->scans_done < (UINT_MAX - nscans)) comedi_inc_scan_progress()
437 async->scans_done += nscans; comedi_inc_scan_progress()
439 async->scans_done = UINT_MAX; comedi_inc_scan_progress()
441 async->scan_progress %= scan_length; comedi_inc_scan_progress()
442 async->events |= COMEDI_CB_EOS; comedi_inc_scan_progress()
466 unsigned int events = s->async->events; comedi_handle_events()
521 struct comedi_async *async; __comedi_device_postconfig_async() local
527 "async subdevices must support SDF_CMD_READ or SDF_CMD_WRITE\n"); __comedi_device_postconfig_async()
532 "async subdevices must have a do_cmdtest() function\n"); __comedi_device_postconfig_async()
536 async = kzalloc(sizeof(*async), GFP_KERNEL); __comedi_device_postconfig_async()
537 if (!async) __comedi_device_postconfig_async()
540 init_waitqueue_head(&async->wait_head); __comedi_device_postconfig_async()
541 s->async = async; __comedi_device_postconfig_async()
543 async->max_bufsize = comedi_default_buf_maxsize_kb * 1024; __comedi_device_postconfig_async()
545 if (buf_size > async->max_bufsize) __comedi_device_postconfig_async()
546 buf_size = async->max_bufsize; __comedi_device_postconfig_async()
H A Dcomedidev.h47 struct comedi_async *async; member in struct:comedi_subdevice
228 * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command
265 /* hw_dev is passed to dma_alloc_coherent when allocating async buffers
555 return s->async->buf_write_count - s->async->buf_read_count; comedi_buf_n_bytes_ready()
/linux-4.1.27/drivers/base/regmap/
H A Dregmap-spi.c27 struct regmap_async_spi *async = data; regmap_spi_complete() local
29 regmap_async_complete_cb(&async->core, async->m.status); regmap_spi_complete()
62 struct regmap_async_spi *async = container_of(a, regmap_spi_async_write() local
68 async->t[0].tx_buf = reg; regmap_spi_async_write()
69 async->t[0].len = reg_len; regmap_spi_async_write()
70 async->t[1].tx_buf = val; regmap_spi_async_write()
71 async->t[1].len = val_len; regmap_spi_async_write()
73 spi_message_init(&async->m); regmap_spi_async_write()
74 spi_message_add_tail(&async->t[0], &async->m); regmap_spi_async_write()
76 spi_message_add_tail(&async->t[1], &async->m); regmap_spi_async_write()
78 async->m.complete = regmap_spi_complete; regmap_spi_async_write()
79 async->m.context = async; regmap_spi_async_write()
81 return spi_async(spi, &async->m); regmap_spi_async_write()
H A Dregmap.c1075 struct regmap_async *async; regmap_exit() local
1084 async = list_first_entry_or_null(&map->async_free, regmap_exit()
1087 list_del(&async->list); regmap_exit()
1088 kfree(async->work_buf); regmap_exit()
1089 kfree(async); regmap_exit()
1280 if (map->async && map->bus->async_write) { _regmap_raw_write()
1281 struct regmap_async *async; _regmap_raw_write() local
1286 async = list_first_entry_or_null(&map->async_free, _regmap_raw_write()
1289 if (async) _regmap_raw_write()
1290 list_del(&async->list); _regmap_raw_write()
1293 if (!async) { _regmap_raw_write()
1294 async = map->bus->async_alloc(); _regmap_raw_write()
1295 if (!async) _regmap_raw_write()
1298 async->work_buf = kzalloc(map->format.buf_size, _regmap_raw_write()
1300 if (!async->work_buf) { _regmap_raw_write()
1301 kfree(async); _regmap_raw_write()
1306 async->map = map; _regmap_raw_write()
1309 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + _regmap_raw_write()
1313 list_add_tail(&async->list, &map->async_list); _regmap_raw_write()
1318 async->work_buf, _regmap_raw_write()
1321 val, val_len, async); _regmap_raw_write()
1324 async->work_buf, _regmap_raw_write()
1327 val_len, NULL, 0, async); _regmap_raw_write()
1334 list_move(&async->list, &map->async_free); _regmap_raw_write()
1521 map->async = true; regmap_write_async()
1525 map->async = false; regmap_write_async()
2020 map->async = true; regmap_raw_write_async()
2024 map->async = false; regmap_raw_write_async()
2399 map->async = true; regmap_update_bits_async()
2403 map->async = false; regmap_update_bits_async()
2461 map->async = true; regmap_update_bits_check_async()
2465 map->async = false; regmap_update_bits_check_async()
2473 void regmap_async_complete_cb(struct regmap_async *async, int ret) regmap_async_complete_cb() argument
2475 struct regmap *map = async->map; regmap_async_complete_cb()
2481 list_move(&async->list, &map->async_free); regmap_async_complete_cb()
2519 /* Nothing to do with no async support */ regmap_async_complete()
2582 map->async = true; regmap_register_patch()
2589 map->async = false; regmap_register_patch()
H A Dinternal.h70 bool async; member in struct:regmap
236 void regmap_async_complete_cb(struct regmap_async *async, int ret);
H A Dregcache.c319 map->async = true; regcache_sync()
343 map->async = false; regcache_sync()
389 map->async = true; regcache_sync_region()
399 map->async = false; regcache_sync_region()
/linux-4.1.27/include/crypto/
H A Dablk_helper.h2 * Shared async block cipher helpers
H A Dcryptd.h2 * Software async crypto daemon
H A Dmcryptd.h2 * Software async multibuffer crypto daemon headers
/linux-4.1.27/drivers/staging/comedi/drivers/
H A Dni_tiocmd.c91 struct comedi_cmd *cmd = &s->async->cmd; ni_tio_input_inttrig()
107 s->async->inttrig = NULL; ni_tio_input_inttrig()
117 struct comedi_async *async = s->async; ni_tio_input_cmd() local
118 struct comedi_cmd *cmd = &async->cmd; ni_tio_input_cmd()
122 comedi_buf_write_alloc(s, async->prealloc_bufsz); ni_tio_input_cmd()
140 async->inttrig = &ni_tio_input_inttrig; ni_tio_input_cmd()
142 async->inttrig = NULL; ni_tio_input_cmd()
170 struct comedi_cmd *cmd = &s->async->cmd; ni_tio_cmd_setup()
197 struct comedi_async *async = s->async; ni_tio_cmd() local
198 struct comedi_cmd *cmd = &async->cmd; ni_tio_cmd()
428 s->async->events |= COMEDI_CB_OVERFLOW; ni_tio_handle_interrupt()
431 s->async->events |= COMEDI_CB_ERROR; ni_tio_handle_interrupt()
439 s->async->events |= COMEDI_CB_OVERFLOW; ni_tio_handle_interrupt()
H A Dmite.c298 struct comedi_async *async = s->async; mite_buf_change() local
313 if (async->prealloc_bufsz == 0) mite_buf_change()
316 n_links = async->prealloc_bufsz >> PAGE_SHIFT; mite_buf_change()
332 cpu_to_le32(async->buf_map->page_list[i].dma_addr); mite_buf_change()
514 struct comedi_async *async = s->async; mite_sync_input_dma() local
518 old_alloc_count = async->buf_write_alloc_count; mite_sync_input_dma()
520 comedi_buf_write_alloc(s, async->prealloc_bufsz); mite_sync_input_dma()
527 async->events |= COMEDI_CB_OVERFLOW; mite_sync_input_dma()
531 count = nbytes - async->buf_write_count; mite_sync_input_dma()
539 async->events |= COMEDI_CB_BLOCK; mite_sync_input_dma()
547 struct comedi_async *async = s->async; mite_sync_output_dma() local
548 struct comedi_cmd *cmd = &async->cmd; mite_sync_output_dma()
550 unsigned int old_alloc_count = async->buf_read_alloc_count; mite_sync_output_dma()
555 comedi_buf_read_alloc(s, async->prealloc_bufsz); mite_sync_output_dma()
564 async->events |= COMEDI_CB_OVERFLOW; mite_sync_output_dma()
567 count = nbytes_lb - async->buf_read_count; mite_sync_output_dma()
573 async->events |= COMEDI_CB_BLOCK; mite_sync_output_dma()
H A Dni_labpc_isadma.c38 struct comedi_cmd *cmd = &s->async->cmd; labpc_suggest_transfer_size()
65 struct comedi_cmd *cmd = &s->async->cmd; labpc_setup_dma()
86 struct comedi_async *async = s->async; labpc_drain_dma() local
87 struct comedi_cmd *cmd = &async->cmd; labpc_drain_dma()
H A Dusbduxsigma.c80 * Size of the async input-buffer IN BYTES, the DIO state is transmitted
214 struct comedi_async *async = s->async; usbduxsigma_ai_handle_urb() local
215 struct comedi_cmd *cmd = &async->cmd; usbduxsigma_ai_handle_urb()
236 async->scans_done >= cmd->stop_arg) usbduxsigma_ai_handle_urb()
237 async->events |= COMEDI_CB_EOA; usbduxsigma_ai_handle_urb()
241 if (!(async->events & COMEDI_CB_CANCEL_MASK)) { usbduxsigma_ai_handle_urb()
250 async->events |= COMEDI_CB_ERROR; usbduxsigma_ai_handle_urb()
260 struct comedi_async *async = s->async; usbduxsigma_ai_urb_complete() local
288 async->events |= COMEDI_CB_ERROR; usbduxsigma_ai_urb_complete()
295 async->events |= COMEDI_CB_ERROR; usbduxsigma_ai_urb_complete()
303 if (async->events & COMEDI_CB_CANCEL_MASK) usbduxsigma_ai_urb_complete()
337 struct comedi_async *async = s->async; usbduxsigma_ao_handle_urb() local
338 struct comedi_cmd *cmd = &async->cmd; usbduxsigma_ao_handle_urb()
348 async->scans_done >= cmd->stop_arg) { usbduxsigma_ao_handle_urb()
349 async->events |= COMEDI_CB_EOA; usbduxsigma_ao_handle_urb()
362 async->events |= COMEDI_CB_OVERFLOW; usbduxsigma_ao_handle_urb()
373 if (!(async->events & COMEDI_CB_CANCEL_MASK)) { usbduxsigma_ao_handle_urb()
392 async->events |= COMEDI_CB_ERROR; usbduxsigma_ao_handle_urb()
402 struct comedi_async *async = s->async; usbduxsigma_ao_urb_complete() local
418 async->events |= COMEDI_CB_ERROR; usbduxsigma_ao_urb_complete()
425 async->events |= COMEDI_CB_ERROR; usbduxsigma_ao_urb_complete()
433 if (async->events & COMEDI_CB_CANCEL_MASK) usbduxsigma_ao_urb_complete()
613 struct comedi_cmd *cmd = &s->async->cmd; usbduxsigma_ai_inttrig()
629 s->async->inttrig = NULL; usbduxsigma_ai_inttrig()
640 struct comedi_cmd *cmd = &s->async->cmd; usbduxsigma_ai_cmd()
698 s->async->inttrig = NULL; usbduxsigma_ai_cmd()
700 s->async->inttrig = usbduxsigma_ai_inttrig; usbduxsigma_ai_cmd()
818 struct comedi_cmd *cmd = &s->async->cmd; usbduxsigma_ao_inttrig()
834 s->async->inttrig = NULL; usbduxsigma_ao_inttrig()
922 struct comedi_cmd *cmd = &s->async->cmd; usbduxsigma_ao_cmd()
953 s->async->inttrig = NULL; usbduxsigma_ao_cmd()
955 s->async->inttrig = usbduxsigma_ao_inttrig; usbduxsigma_ao_cmd()
H A Dusbdux.c253 struct comedi_async *async = s->async; usbduxsub_ai_handle_urb() local
254 struct comedi_cmd *cmd = &async->cmd; usbduxsub_ai_handle_urb()
277 async->scans_done >= cmd->stop_arg) usbduxsub_ai_handle_urb()
278 async->events |= COMEDI_CB_EOA; usbduxsub_ai_handle_urb()
282 if (!(async->events & COMEDI_CB_CANCEL_MASK)) { usbduxsub_ai_handle_urb()
292 async->events |= COMEDI_CB_ERROR; usbduxsub_ai_handle_urb()
301 struct comedi_async *async = s->async; usbduxsub_ai_isoc_irq() local
330 async->events |= COMEDI_CB_ERROR; usbduxsub_ai_isoc_irq()
338 async->events |= COMEDI_CB_ERROR; usbduxsub_ai_isoc_irq()
346 if (async->events & COMEDI_CB_CANCEL_MASK) usbduxsub_ai_isoc_irq()
381 struct comedi_async *async = s->async; usbduxsub_ao_handle_urb() local
382 struct comedi_cmd *cmd = &async->cmd; usbduxsub_ao_handle_urb()
392 async->scans_done >= cmd->stop_arg) { usbduxsub_ao_handle_urb()
393 async->events |= COMEDI_CB_EOA; usbduxsub_ao_handle_urb()
406 async->events |= COMEDI_CB_OVERFLOW; usbduxsub_ao_handle_urb()
419 if (!(async->events & COMEDI_CB_CANCEL_MASK)) { usbduxsub_ao_handle_urb()
439 async->events |= COMEDI_CB_ERROR; usbduxsub_ao_handle_urb()
448 struct comedi_async *async = s->async; usbduxsub_ao_isoc_irq() local
465 async->events |= COMEDI_CB_ERROR; usbduxsub_ao_isoc_irq()
473 async->events |= COMEDI_CB_ERROR; usbduxsub_ao_isoc_irq()
481 if (async->events & COMEDI_CB_CANCEL_MASK) usbduxsub_ao_isoc_irq()
653 struct comedi_cmd *cmd = &s->async->cmd; usbdux_ai_inttrig()
669 s->async->inttrig = NULL; usbdux_ai_inttrig()
682 struct comedi_cmd *cmd = &s->async->cmd; usbdux_ai_cmd()
740 s->async->inttrig = NULL; usbdux_ai_cmd()
745 s->async->inttrig = usbdux_ai_inttrig; usbdux_ai_cmd()
861 struct comedi_cmd *cmd = &s->async->cmd; usbdux_ao_inttrig()
877 s->async->inttrig = NULL; usbdux_ao_inttrig()
974 struct comedi_cmd *cmd = &s->async->cmd; usbdux_ao_cmd()
1010 s->async->inttrig = NULL; usbdux_ao_cmd()
1015 s->async->inttrig = usbdux_ao_inttrig; usbdux_ao_cmd()
H A Dni_pcidio.c350 comedi_buf_write_alloc(s, s->async->prealloc_bufsz); setup_mite_dma()
385 struct comedi_async *async = s->async; nidio_interrupt() local
421 async->events |= COMEDI_CB_ERROR; nidio_interrupt()
457 async->events |= COMEDI_CB_EOA; nidio_interrupt()
463 async->events |= COMEDI_CB_ERROR; nidio_interrupt()
468 async->events |= COMEDI_CB_EOA; nidio_interrupt()
472 async->events |= COMEDI_CB_EOA; nidio_interrupt()
621 struct comedi_cmd *cmd = &s->async->cmd; ni_pcidio_inttrig()
627 s->async->inttrig = NULL; ni_pcidio_inttrig()
635 struct comedi_cmd *cmd = &s->async->cmd; ni_pcidio_cmd()
735 s->async->inttrig = NULL; ni_pcidio_cmd()
738 s->async->inttrig = ni_pcidio_inttrig; ni_pcidio_cmd()
763 memset(s->async->prealloc_buf, 0xaa, s->async->prealloc_bufsz); ni_pcidio_change()
H A Dadl_pci9118.c429 struct comedi_cmd *cmd = &s->async->cmd; valid_samples_in_act_dma_buf()
486 struct comedi_cmd *cmd = &s->async->cmd; move_block_from_dma()
556 struct comedi_cmd *cmd = &s->async->cmd; pci9118_calc_divisors()
610 s->async->inttrig = NULL; pci9118_ai_cancel()
641 struct comedi_cmd *cmd = &s->async->cmd; interrupt_pci9118_ai_onesample()
649 if (s->async->scans_done >= cmd->stop_arg) interrupt_pci9118_ai_onesample()
650 s->async->events |= COMEDI_CB_EOA; interrupt_pci9118_ai_onesample()
658 struct comedi_cmd *cmd = &s->async->cmd; interrupt_pci9118_ai_dma()
682 if (s->async->scans_done >= cmd->stop_arg) interrupt_pci9118_ai_dma()
683 s->async->events |= COMEDI_CB_EOA; interrupt_pci9118_ai_dma()
686 if (s->async->events & COMEDI_CB_CANCEL_MASK) interrupt_pci9118_ai_dma()
719 s->async->events |= COMEDI_CB_ERROR; pci9118_interrupt()
725 s->async->events |= COMEDI_CB_ERROR; pci9118_interrupt()
733 s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW; pci9118_interrupt()
739 s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW; pci9118_interrupt()
744 s->async->events |= COMEDI_CB_ERROR; pci9118_interrupt()
749 s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW; pci9118_interrupt()
808 struct comedi_cmd *cmd = &s->async->cmd; pci9118_ai_inttrig()
813 s->async->inttrig = NULL; pci9118_ai_inttrig()
823 struct comedi_cmd *cmd = &s->async->cmd; Compute_and_setup_dma()
831 if (dmalen0 > s->async->prealloc_bufsz) { Compute_and_setup_dma()
833 dmalen0 = s->async->prealloc_bufsz & ~3L; Compute_and_setup_dma()
835 if (dmalen1 > s->async->prealloc_bufsz) { Compute_and_setup_dma()
837 dmalen1 = s->async->prealloc_bufsz & ~3L; Compute_and_setup_dma()
946 struct comedi_cmd *cmd = &s->async->cmd; pci9118_ai_cmd()
1152 /* start async command now or wait for internal trigger */ pci9118_ai_cmd()
1156 s->async->inttrig = pci9118_ai_inttrig; pci9118_ai_cmd()
H A Ddas16m1.c252 struct comedi_async *async = s->async; das16m1_cmd_exec() local
253 struct comedi_cmd *cmd = &async->cmd; das16m1_cmd_exec()
401 struct comedi_async *async; das16m1_handler() local
407 async = s->async; das16m1_handler()
408 cmd = &async->cmd; das16m1_handler()
441 async->events |= COMEDI_CB_EOA; das16m1_handler()
448 async->events |= COMEDI_CB_ERROR; das16m1_handler()
H A Dcb_pcidas.c884 struct comedi_async *async = s->async; cb_pcidas_ai_cmd() local
885 struct comedi_cmd *cmd = &async->cmd; cb_pcidas_ai_cmd()
1095 struct comedi_async *async = s->async; cb_pcidas_ao_inttrig() local
1096 struct comedi_cmd *cmd = &async->cmd; cb_pcidas_ao_inttrig()
1118 async->inttrig = NULL; cb_pcidas_ao_inttrig()
1127 struct comedi_async *async = s->async; cb_pcidas_ao_cmd() local
1128 struct comedi_cmd *cmd = &async->cmd; cb_pcidas_ao_cmd()
1173 async->inttrig = cb_pcidas_ao_inttrig; cb_pcidas_ao_cmd()
1203 struct comedi_async *async = s->async; handle_ao_interrupt() local
1204 struct comedi_cmd *cmd = &async->cmd; handle_ao_interrupt()
1215 async->scans_done >= cmd->stop_arg) { handle_ao_interrupt()
1216 async->events |= COMEDI_CB_EOA; handle_ao_interrupt()
1219 async->events |= COMEDI_CB_ERROR; handle_ao_interrupt()
1241 struct comedi_async *async; cb_pcidas_interrupt() local
1252 async = s->async; cb_pcidas_interrupt()
1253 cmd = &async->cmd; cb_pcidas_interrupt()
1281 async->scans_done >= cmd->stop_arg) cb_pcidas_interrupt()
1282 async->events |= COMEDI_CB_EOA; cb_pcidas_interrupt()
1302 async->scans_done >= cmd->stop_arg) { cb_pcidas_interrupt()
1303 async->events |= COMEDI_CB_EOA; cb_pcidas_interrupt()
1329 async->events |= COMEDI_CB_ERROR; cb_pcidas_interrupt()
H A Dcomedi_test.c172 struct comedi_async *async = s->async; waveform_ai_interrupt() local
173 struct comedi_cmd *cmd = &async->cmd; waveform_ai_interrupt()
210 if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) waveform_ai_interrupt()
211 async->events |= COMEDI_CB_EOA; waveform_ai_interrupt()
304 struct comedi_cmd *cmd = &s->async->cmd; waveform_ai_cmd()
H A Ddas6402.c31 * [1] - IRQ (optional, needed for async command support)
185 struct comedi_async *async = s->async; das6402_interrupt() local
186 struct comedi_cmd *cmd = &async->cmd; das6402_interrupt()
194 async->events |= COMEDI_CB_OVERFLOW; das6402_interrupt()
202 async->scans_done >= cmd->stop_arg) das6402_interrupt()
203 async->events |= COMEDI_CB_EOA; das6402_interrupt()
234 struct comedi_cmd *cmd = &s->async->cmd; das6402_ai_cmd()
H A Daddi_apci_3120.c214 struct comedi_cmd *cmd = &s->async->cmd; apci3120_setup_dma()
250 if (dmalen0 > s->async->prealloc_bufsz) apci3120_setup_dma()
251 dmalen0 = s->async->prealloc_bufsz; apci3120_setup_dma()
252 if (dmalen1 > s->async->prealloc_bufsz) apci3120_setup_dma()
253 dmalen1 = s->async->prealloc_bufsz; apci3120_setup_dma()
435 struct comedi_async *async = s->async; apci3120_interrupt_dma() local
436 struct comedi_cmd *cmd = &async->cmd; apci3120_interrupt_dma()
449 async->events |= COMEDI_CB_ERROR; apci3120_interrupt_dma()
458 async->events |= COMEDI_CB_EOS; apci3120_interrupt_dma()
461 if ((async->events & COMEDI_CB_CANCEL_MASK) || apci3120_interrupt_dma()
462 (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)) apci3120_interrupt_dma()
481 struct comedi_async *async = s->async; apci3120_interrupt() local
482 struct comedi_cmd *cmd = &async->cmd; apci3120_interrupt()
540 if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) apci3120_interrupt()
541 async->events |= COMEDI_CB_EOA; apci3120_interrupt()
552 struct comedi_cmd *cmd = &s->async->cmd; apci3120_ai_cmd()
H A Dadv_pci1710.c537 struct comedi_cmd *cmd = &s->async->cmd; pci1710_handle_every_sample()
545 s->async->events |= COMEDI_CB_ERROR; pci1710_handle_every_sample()
551 s->async->events |= COMEDI_CB_ERROR; pci1710_handle_every_sample()
558 ret = pci171x_ai_read_sample(dev, s, s->async->cur_chan, &val); pci1710_handle_every_sample()
560 s->async->events |= COMEDI_CB_ERROR; pci1710_handle_every_sample()
567 s->async->scans_done >= cmd->stop_arg) { pci1710_handle_every_sample()
568 s->async->events |= COMEDI_CB_EOA; pci1710_handle_every_sample()
580 struct comedi_async *async = s->async; pci1710_handle_fifo() local
581 struct comedi_cmd *cmd = &async->cmd; pci1710_handle_fifo()
588 async->events |= COMEDI_CB_ERROR; pci1710_handle_fifo()
594 async->events |= COMEDI_CB_ERROR; pci1710_handle_fifo()
602 ret = pci171x_ai_read_sample(dev, s, s->async->cur_chan, &val); pci1710_handle_fifo()
604 s->async->events |= COMEDI_CB_ERROR; pci1710_handle_fifo()
612 async->scans_done >= cmd->stop_arg) { pci1710_handle_fifo()
613 async->events |= COMEDI_CB_EOA; pci1710_handle_fifo()
632 cmd = &s->async->cmd; interrupt_service_pci1710()
665 struct comedi_cmd *cmd = &s->async->cmd; pci171x_ai_cmd()
H A Dusbduxfast.c241 struct comedi_async *async = s->async; usbduxfast_ai_handle_urb() local
242 struct comedi_cmd *cmd = &async->cmd; usbduxfast_ai_handle_urb()
255 async->scans_done >= cmd->stop_arg) usbduxfast_ai_handle_urb()
256 async->events |= COMEDI_CB_EOA; usbduxfast_ai_handle_urb()
260 if (!(async->events & COMEDI_CB_CANCEL_MASK)) { usbduxfast_ai_handle_urb()
266 async->events |= COMEDI_CB_ERROR; usbduxfast_ai_handle_urb()
275 struct comedi_async *async = s->async; usbduxfast_ai_interrupt() local
292 async->events |= COMEDI_CB_ERROR; usbduxfast_ai_interrupt()
300 async->events |= COMEDI_CB_ERROR; usbduxfast_ai_interrupt()
308 if (async->events & COMEDI_CB_CANCEL_MASK) usbduxfast_ai_interrupt()
430 struct comedi_cmd *cmd = &s->async->cmd; usbduxfast_ai_inttrig()
450 s->async->inttrig = NULL; usbduxfast_ai_inttrig()
462 struct comedi_cmd *cmd = &s->async->cmd; usbduxfast_ai_cmd()
796 s->async->inttrig = NULL; usbduxfast_ai_cmd()
798 s->async->inttrig = usbduxfast_ai_inttrig; usbduxfast_ai_cmd()
825 "ai_insn_read not possible, async cmd is running\n"); usbduxfast_ai_insn_read()
H A Ddas800.c379 struct comedi_async *async = s->async; das800_ai_do_cmd() local
380 struct comedi_cmd *cmd = &async->cmd; das800_ai_do_cmd()
435 struct comedi_async *async; das800_interrupt() local
450 async = s->async; das800_interrupt()
451 cmd = &async->cmd; das800_interrupt()
487 async->scans_done >= cmd->stop_arg) { das800_interrupt()
488 async->events |= COMEDI_CB_EOA; das800_interrupt()
495 async->events |= COMEDI_CB_ERROR; das800_interrupt()
500 if (!(async->events & COMEDI_CB_CANCEL_MASK)) { das800_interrupt()
H A Damplc_pci230.c1019 cmd = &s->async->cmd; pci230_ao_stop()
1061 struct comedi_async *async = s->async; pci230_handle_ao_nofifo() local
1062 struct comedi_cmd *cmd = &async->cmd; pci230_handle_ao_nofifo()
1066 if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) pci230_handle_ao_nofifo()
1073 async->events |= COMEDI_CB_OVERFLOW; pci230_handle_ao_nofifo()
1080 if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) pci230_handle_ao_nofifo()
1081 async->events |= COMEDI_CB_EOA; pci230_handle_ao_nofifo()
1092 struct comedi_async *async = s->async; pci230_handle_ao_fifo() local
1093 struct comedi_cmd *cmd = &async->cmd; pci230_handle_ao_fifo()
1151 async->scans_done >= cmd->stop_arg) { pci230_handle_ao_fifo()
1168 async->events |= events; pci230_handle_ao_fifo()
1169 return !(async->events & COMEDI_CB_CANCEL_MASK); pci230_handle_ao_fifo()
1209 struct comedi_async *async = s->async; pci230_ao_start() local
1210 struct comedi_cmd *cmd = &async->cmd; pci230_ao_start()
1269 async->inttrig = pci230_ao_inttrig_scan_begin; pci230_ao_start()
1285 struct comedi_cmd *cmd = &s->async->cmd; pci230_ao_inttrig_start()
1290 s->async->inttrig = NULL; pci230_ao_inttrig_start()
1303 struct comedi_cmd *cmd = &s->async->cmd; pci230_ao_cmd()
1361 s->async->inttrig = pci230_ao_inttrig_start; pci230_ao_cmd()
1710 struct comedi_cmd *cmd = &s->async->cmd; pci230_ai_update_fifo_trigger_level()
1716 wake = cmd->scan_end_arg - s->async->cur_chan; pci230_ai_update_fifo_trigger_level()
1819 cmd = &s->async->cmd; pci230_ai_stop()
1859 struct comedi_async *async = s->async; pci230_ai_start() local
1860 struct comedi_cmd *cmd = &async->cmd; pci230_ai_start()
1913 async->inttrig = pci230_ai_inttrig_convert; pci230_ai_start()
1981 async->inttrig = pci230_ai_inttrig_scan_begin; pci230_ai_start()
1995 struct comedi_cmd *cmd = &s->async->cmd; pci230_ai_inttrig_start()
2000 s->async->inttrig = NULL; pci230_ai_inttrig_start()
2010 struct comedi_async *async = s->async; pci230_handle_ai() local
2011 struct comedi_cmd *cmd = &async->cmd; pci230_handle_ai()
2034 async->events |= COMEDI_CB_ERROR; pci230_handle_ai()
2061 async->scans_done >= cmd->stop_arg) { pci230_handle_ai()
2062 async->events |= COMEDI_CB_EOA; pci230_handle_ai()
2068 if (!(async->events & COMEDI_CB_CANCEL_MASK)) pci230_handle_ai()
2081 struct comedi_async *async = s->async; pci230_ai_cmd() local
2082 struct comedi_cmd *cmd = &async->cmd; pci230_ai_cmd()
2260 s->async->inttrig = pci230_ai_inttrig_start; pci230_ai_cmd()
H A Daddi_apci_2032.c136 struct comedi_cmd *cmd = &s->async->cmd; apci2032_int_cmd()
175 struct comedi_cmd *cmd = &s->async->cmd; apci2032_interrupt()
214 s->async->scans_done >= cmd->stop_arg) apci2032_interrupt()
215 s->async->events |= COMEDI_CB_EOA; apci2032_interrupt()
H A Dni_at_a2150.c159 struct comedi_async *async = s->async; a2150_interrupt() local
160 struct comedi_cmd *cmd = &async->cmd; a2150_interrupt()
175 async->events |= COMEDI_CB_ERROR; a2150_interrupt()
180 async->events |= COMEDI_CB_ERROR; a2150_interrupt()
222 async->events |= COMEDI_CB_EOA; a2150_interrupt()
488 struct comedi_async *async = s->async; a2150_ai_cmd() local
489 struct comedi_cmd *cmd = &async->cmd; a2150_ai_cmd()
723 /* an IRQ and DMA are required to support async commands */ a2150_attach()
H A Dni_labpc_common.c624 struct comedi_async *async = s->async; labpc_ai_cmd() local
625 struct comedi_cmd *cmd = &async->cmd; labpc_ai_cmd()
769 struct comedi_async *async = dev->read_subdev->async; labpc_drain_fifo() local
770 struct comedi_cmd *cmd = &async->cmd; labpc_drain_fifo()
791 async->events |= COMEDI_CB_ERROR; labpc_drain_fifo()
817 struct comedi_async *async; labpc_interrupt() local
825 async = s->async; labpc_interrupt()
826 cmd = &async->cmd; labpc_interrupt()
843 async->events |= COMEDI_CB_ERROR; labpc_interrupt()
863 async->events |= COMEDI_CB_ERROR; labpc_interrupt()
872 async->events |= COMEDI_CB_EOA; labpc_interrupt()
879 async->events |= COMEDI_CB_EOA; labpc_interrupt()
H A Dadl_pci9111.c364 struct comedi_cmd *cmd = &s->async->cmd; pci9111_ai_do_cmd()
436 struct comedi_cmd *cmd = &s->async->cmd; pci9111_handle_fifo_half_full()
483 struct comedi_async *async; pci9111_interrupt() local
495 async = s->async; pci9111_interrupt()
496 cmd = &async->cmd; pci9111_interrupt()
521 async->events |= COMEDI_CB_ERROR; pci9111_interrupt()
532 if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) pci9111_interrupt()
533 async->events |= COMEDI_CB_EOA; pci9111_interrupt()
H A Dgsc_hpdi.c173 struct comedi_cmd *cmd = &s->async->cmd; gsc_hpdi_drain_dma()
213 struct comedi_async *async = s->async; gsc_hpdi_interrupt() local
262 async->events |= COMEDI_CB_ERROR; gsc_hpdi_interrupt()
267 async->events |= COMEDI_CB_ERROR; gsc_hpdi_interrupt()
271 async->events |= COMEDI_CB_EOA; gsc_hpdi_interrupt()
306 struct comedi_async *async = s->async; gsc_hpdi_cmd() local
307 struct comedi_cmd *cmd = &async->cmd; gsc_hpdi_cmd()
H A Dpcmmio.c325 s->async->inttrig = NULL; pcmmio_stop_intr()
336 struct comedi_cmd *cmd = &s->async->cmd; pcmmio_handle_dio_intr()
359 s->async->scans_done >= cmd->stop_arg) pcmmio_handle_dio_intr()
360 s->async->events |= COMEDI_CB_EOA; pcmmio_handle_dio_intr()
394 struct comedi_cmd *cmd = &s->async->cmd; pcmmio_start_intr()
438 struct comedi_cmd *cmd = &s->async->cmd; pcmmio_inttrig_start_intr()
445 s->async->inttrig = NULL; pcmmio_inttrig_start_intr()
459 struct comedi_cmd *cmd = &s->async->cmd; pcmmio_cmd()
467 s->async->inttrig = pcmmio_inttrig_start_intr; pcmmio_cmd()
H A Dpcmuio.c302 s->async->inttrig = NULL; pcmuio_stop_intr()
315 struct comedi_cmd *cmd = &s->async->cmd; pcmuio_handle_intr_subdev()
338 s->async->scans_done >= cmd->stop_arg) pcmuio_handle_intr_subdev()
339 s->async->events |= COMEDI_CB_EOA; pcmuio_handle_intr_subdev()
390 struct comedi_cmd *cmd = &s->async->cmd; pcmuio_start_intr()
436 struct comedi_cmd *cmd = &s->async->cmd; pcmuio_inttrig_start_intr()
445 s->async->inttrig = NULL; pcmuio_inttrig_start_intr()
460 struct comedi_cmd *cmd = &s->async->cmd; pcmuio_cmd()
470 s->async->inttrig = pcmuio_inttrig_start_intr; pcmuio_cmd()
H A Ddt282x.c34 * [1] - IRQ (optional, required for async command support)
35 * [2] - DMA 1 (optional, required for async command support)
36 * [3] - DMA 2 (optional, required for async command support)
449 s->async->events |= COMEDI_CB_OVERFLOW; dt282x_ao_dma_interrupt()
479 s->async->events |= COMEDI_CB_EOA; dt282x_ai_dma_interrupt()
523 s->async->events |= COMEDI_CB_ERROR; dt282x_interrupt()
529 s_ao->async->events |= COMEDI_CB_ERROR; dt282x_interrupt()
545 s->async->events |= COMEDI_CB_EOA; dt282x_interrupt()
721 struct comedi_cmd *cmd = &s->async->cmd; dt282x_ai_cmd()
885 struct comedi_cmd *cmd = &s->async->cmd; dt282x_ao_inttrig()
898 s->async->inttrig = NULL; dt282x_ao_inttrig()
907 struct comedi_cmd *cmd = &s->async->cmd; dt282x_ao_cmd()
936 s->async->inttrig = dt282x_ao_inttrig; dt282x_ao_cmd()
1120 /* an IRQ and 2 DMA channels are required for async command support */ dt282x_attach()
H A Ddas1800.c483 struct comedi_cmd *cmd = &s->async->cmd; das1800_handle_fifo_not_empty()
496 s->async->scans_done >= cmd->stop_arg) das1800_handle_fifo_not_empty()
591 struct comedi_async *async = s->async; das1800_ai_handler() local
592 struct comedi_cmd *cmd = &async->cmd; das1800_ai_handler()
612 async->events |= COMEDI_CB_ERROR; das1800_ai_handler()
626 async->events |= COMEDI_CB_EOA; das1800_ai_handler()
628 async->scans_done >= cmd->stop_arg) { das1800_ai_handler()
629 async->events |= COMEDI_CB_EOA; das1800_ai_handler()
907 struct comedi_cmd *cmd = &s->async->cmd; das1800_ai_transfer_size()
995 struct comedi_async *async = s->async; das1800_ai_do_cmd() local
996 const struct comedi_cmd *cmd = &async->cmd; das1800_ai_do_cmd()
H A Dpcl818.c438 struct comedi_cmd *cmd = &s->async->cmd; pcl818_ai_write_sample()
448 s->async->events |= COMEDI_CB_ERROR; pcl818_ai_write_sample()
459 s->async->scans_done >= cmd->stop_arg) { pcl818_ai_write_sample()
460 s->async->events |= COMEDI_CB_EOA; pcl818_ai_write_sample()
475 s->async->events |= COMEDI_CB_ERROR; pcl818_handle_eoc()
520 s->async->events |= COMEDI_CB_ERROR; pcl818_handle_fifo()
527 s->async->events |= COMEDI_CB_ERROR; pcl818_handle_fifo()
548 struct comedi_cmd *cmd = &s->async->cmd; pcl818_interrupt()
562 s->async->scans_done = cmd->stop_arg; pcl818_interrupt()
722 struct comedi_cmd *cmd = &s->async->cmd; pcl818_ai_cmd()
773 struct comedi_cmd *cmd = &s->async->cmd; pcl818_ai_cancel()
781 s->async->scans_done < cmd->stop_arg)) { pcl818_ai_cancel()
1007 /* we can use IRQ 2-7 for async command support */ pcl818_attach()
H A Damplc_dio200_common.c222 struct comedi_cmd *cmd = &s->async->cmd; dio200_start_intr()
244 struct comedi_cmd *cmd = &s->async->cmd; dio200_inttrig_start_intr()
251 s->async->inttrig = NULL; dio200_inttrig_start_intr()
264 struct comedi_cmd *cmd = &s->async->cmd; dio200_read_scan_intr()
278 s->async->scans_done >= cmd->stop_arg) dio200_read_scan_intr()
279 s->async->events |= COMEDI_CB_EOA; dio200_read_scan_intr()
420 struct comedi_cmd *cmd = &s->async->cmd; dio200_subdev_intr_cmd()
429 s->async->inttrig = dio200_inttrig_start_intr; dio200_subdev_intr_cmd()
H A Dpcl711.c194 struct comedi_cmd *cmd = &s->async->cmd; pcl711_interrupt()
209 s->async->scans_done >= cmd->stop_arg) pcl711_interrupt()
210 s->async->events |= COMEDI_CB_EOA; pcl711_interrupt()
349 struct comedi_cmd *cmd = &s->async->cmd; pcl711_ai_cmd()
H A Damplc_pci224.c484 struct comedi_cmd *cmd = &s->async->cmd; pci224_ao_start()
507 struct comedi_cmd *cmd = &s->async->cmd; pci224_ao_handle_fifo()
519 s->async->scans_done >= cmd->stop_arg) { pci224_ao_handle_fifo()
521 s->async->events |= COMEDI_CB_EOA; pci224_ao_handle_fifo()
541 s->async->events |= COMEDI_CB_OVERFLOW; pci224_ao_handle_fifo()
561 s->async->scans_done >= cmd->stop_arg) { pci224_ao_handle_fifo()
608 struct comedi_cmd *cmd = &s->async->cmd; pci224_ao_inttrig_start()
613 s->async->inttrig = NULL; pci224_ao_inttrig_start()
831 struct comedi_cmd *cmd = &s->async->cmd; pci224_ao_cmd()
886 s->async->inttrig = pci224_ao_inttrig_start; pci224_ao_cmd()
915 struct comedi_cmd *cmd = &s->async->cmd; pci224_ao_munge()
964 cmd = &s->async->cmd; pci224_interrupt()
H A Dpcl816.c223 struct comedi_cmd *cmd = &s->async->cmd; pcl816_ai_next_chan()
226 s->async->scans_done >= cmd->stop_arg) { pcl816_ai_next_chan()
227 s->async->events |= COMEDI_CB_EOA; pcl816_ai_next_chan()
419 struct comedi_cmd *cmd = &s->async->cmd; pcl816_ai_cmd()
626 /* an IRQ and DMA are required to support async commands */ pcl816_attach()
H A Dni_mio_common.c1174 s->async->events |= COMEDI_CB_OVERFLOW; ni_ao_fifo_half_empty()
1218 struct comedi_async *async = s->async; ni_ai_fifo_read() local
1261 async->events |= COMEDI_CB_ERROR; ni_ai_fifo_read()
1400 s->async->events |= COMEDI_CB_EOA; shutdown_ai_command()
1414 if ((s->async->events & COMEDI_CB_EOS)) ni_handle_eos()
1420 s->async->events |= COMEDI_CB_EOS; ni_handle_eos()
1464 struct comedi_cmd *cmd = &s->async->cmd; handle_a_interrupt()
1480 s->async->events |= COMEDI_CB_ERROR; handle_a_interrupt()
1493 s->async->events |= COMEDI_CB_ERROR; handle_a_interrupt()
1505 s->async->events |= COMEDI_CB_ERROR; handle_a_interrupt()
1507 s->async->events |= COMEDI_CB_OVERFLOW; handle_a_interrupt()
1580 s->async->events |= COMEDI_CB_ERROR; handle_b_interrupt()
1590 s->async->events |= COMEDI_CB_OVERFLOW; handle_b_interrupt()
1594 s->async->events |= COMEDI_CB_EOA; handle_b_interrupt()
1606 s->async->events |= COMEDI_CB_OVERFLOW; handle_b_interrupt()
1619 struct comedi_async *async = s->async; ni_ai_munge() local
1620 struct comedi_cmd *cmd = &async->cmd; ni_ai_munge()
1656 comedi_buf_write_alloc(s, s->async->prealloc_bufsz); ni_ai_setup_MITE_dma()
1690 comedi_buf_read_alloc(s, s->async->prealloc_bufsz); ni_ao_setup_MITE_dma()
2372 struct comedi_cmd *cmd = &s->async->cmd; ni_ai_inttrig()
2379 s->async->inttrig = NULL; ni_ai_inttrig()
2387 const struct comedi_cmd *cmd = &s->async->cmd; ni_ai_cmd()
2672 s->async->inttrig = NULL; ni_ai_cmd()
2674 s->async->inttrig = NULL; ni_ai_cmd()
2676 s->async->inttrig = ni_ai_inttrig; ni_ai_cmd()
2735 struct comedi_cmd *cmd = &s->async->cmd; ni_ao_munge()
2984 struct comedi_cmd *cmd = &s->async->cmd; ni_ao_inttrig()
2996 s->async->inttrig = NULL; ni_ao_inttrig()
3057 const struct comedi_cmd *cmd = &s->async->cmd; ni_ao_cmd()
3248 s->async->inttrig = ni_ao_inttrig; ni_ao_cmd()
3523 struct comedi_cmd *cmd = &s->async->cmd; ni_cdo_inttrig()
3535 s->async->inttrig = NULL; ni_cdo_inttrig()
3538 comedi_buf_read_alloc(s, s->async->prealloc_bufsz); ni_cdo_inttrig()
3576 const struct comedi_cmd *cmd = &s->async->cmd; ni_cdio_cmd()
3607 s->async->inttrig = ni_cdo_inttrig; ni_cdio_cmd()
3658 s->async->events |= COMEDI_CB_OVERFLOW; handle_cdio_interrupt()
3663 /* s->async->events |= COMEDI_CB_EOA; */ handle_cdio_interrupt()
5444 * async command support. ni_E_init()
H A Dquatech_daqp_cs.c189 struct comedi_cmd *cmd = &s->async->cmd; daqp_interrupt()
207 s->async->events |= COMEDI_CB_OVERFLOW; daqp_interrupt()
223 s->async->scans_done >= cmd->stop_arg) { daqp_interrupt()
224 s->async->events |= COMEDI_CB_EOA; daqp_interrupt()
235 s->async->events |= COMEDI_CB_ERROR; daqp_interrupt()
449 struct comedi_cmd *cmd = &s->async->cmd; daqp_ai_cmd()
H A Ddas16.c476 struct comedi_async *async = s->async; das16_interrupt() local
477 struct comedi_cmd *cmd = &async->cmd; das16_interrupt()
502 async->events |= COMEDI_CB_ERROR; das16_interrupt()
519 if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) das16_interrupt()
520 async->events |= COMEDI_CB_EOA; das16_interrupt()
704 struct comedi_async *async = s->async; das16_cmd_exec() local
705 struct comedi_cmd *cmd = &async->cmd; das16_cmd_exec()
H A Ddmm32at.c373 struct comedi_cmd *cmd = &s->async->cmd; dmm32at_ai_cmd()
426 struct comedi_cmd *cmd = &s->async->cmd; dmm32at_isr()
434 s->async->scans_done >= cmd->stop_arg) dmm32at_isr()
435 s->async->events |= COMEDI_CB_EOA; dmm32at_isr()
H A Dcb_pcidas64.c2541 struct comedi_async *async = s->async; ai_cmd() local
2542 struct comedi_cmd *cmd = &async->cmd; ai_cmd()
2798 struct comedi_async *async = s->async; handle_ai_interrupt() local
2799 struct comedi_cmd *cmd = &async->cmd; handle_ai_interrupt()
2806 async->events |= COMEDI_CB_ERROR; handle_ai_interrupt()
2835 async->scans_done >= cmd->stop_arg) || handle_ai_interrupt()
2837 async->events |= COMEDI_CB_EOA; handle_ai_interrupt()
2975 struct comedi_async *async; handle_ao_interrupt() local
2983 async = s->async; handle_ao_interrupt()
2984 cmd = &async->cmd; handle_ao_interrupt()
3010 async->scans_done >= cmd->stop_arg) || handle_ao_interrupt()
3012 async->events |= COMEDI_CB_EOA; handle_ao_interrupt()
3014 async->events |= COMEDI_CB_ERROR; handle_ao_interrupt()
3209 s->async->scans_done >= cmd->stop_arg) prep_ao_dma()
3241 struct comedi_cmd *cmd = &s->async->cmd; ao_inttrig()
3256 s->async->inttrig = NULL; ao_inttrig()
3264 struct comedi_cmd *cmd = &s->async->cmd; ao_cmd()
3281 s->async->inttrig = ao_inttrig; ao_cmd()
H A Ddt2814.c182 struct comedi_cmd *cmd = &s->async->cmd; dt2814_ai_cmd()
228 s->async->events |= COMEDI_CB_EOA; dt2814_interrupt()
H A Dpcl812.c705 struct comedi_cmd *cmd = &s->async->cmd; pcl812_ai_cmd()
759 struct comedi_cmd *cmd = &s->async->cmd; pcl812_ai_next_chan()
762 s->async->scans_done >= cmd->stop_arg) { pcl812_ai_next_chan()
763 s->async->events |= COMEDI_CB_EOA; pcl812_ai_next_chan()
773 struct comedi_cmd *cmd = &s->async->cmd; pcl812_handle_eoc()
774 unsigned int chan = s->async->cur_chan; pcl812_handle_eoc()
780 s->async->events |= COMEDI_CB_ERROR; pcl812_handle_eoc()
788 next_chan = s->async->cur_chan; pcl812_handle_eoc()
H A Ddt3000.c355 s->async->events |= COMEDI_CB_ERROR; dt3k_interrupt()
359 s->async->events |= COMEDI_CB_EOA; dt3k_interrupt()
479 struct comedi_cmd *cmd = &s->async->cmd; dt3k_ai_cmd()
H A Ds626.c1349 struct comedi_cmd *cmd = &s->async->cmd; s626_handle_dio_interrupt()
1412 struct comedi_async *async = s->async; s626_check_counter_interrupts() local
1413 struct comedi_cmd *cmd = &async->cmd; s626_check_counter_interrupts()
1472 struct comedi_async *async = s->async; s626_handle_eos_interrupt() local
1473 struct comedi_cmd *cmd = &async->cmd; s626_handle_eos_interrupt()
1496 if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) s626_handle_eos_interrupt()
1497 async->events |= COMEDI_CB_EOA; s626_handle_eos_interrupt()
1499 if (async->events & COMEDI_CB_CANCEL_MASK) s626_handle_eos_interrupt()
1559 struct comedi_cmd *cmd = &s->async->cmd; s626_reset_adc()
1925 struct comedi_cmd *cmd = &s->async->cmd; s626_ai_inttrig()
1933 s->async->inttrig = NULL; s626_ai_inttrig()
2014 struct comedi_cmd *cmd = &s->async->cmd; s626_ai_cmd()
2091 s->async->inttrig = NULL; s626_ai_cmd()
2096 s->async->inttrig = NULL; s626_ai_cmd()
2099 s->async->inttrig = s626_ai_inttrig; s626_ai_cmd()
H A Daio_iiro_16.c29 * The sample data returned by the async command indicates which inputs
H A Drtd520.c603 struct comedi_async *async = s->async; ai_read_n() local
604 struct comedi_cmd *cmd = &async->cmd; ai_read_n()
608 unsigned int range = CR_RANGE(cmd->chanlist[async->cur_chan]); ai_read_n()
700 s->async->events |= COMEDI_CB_ERROR; rtd_interrupt()
703 s->async->events |= COMEDI_CB_EOA; rtd_interrupt()
871 The data get stored in the async structure of the subdevice.
878 struct comedi_cmd *cmd = &s->async->cmd; rtd_ai_cmd()
H A Daddi_apci_1500.c293 struct comedi_cmd *cmd = &s->async->cmd; apci1500_di_inttrig_start()
374 s->async->inttrig = apci1500_di_inttrig_start; apci1500_di_cmd()
436 * input async command is started.
/linux-4.1.27/arch/powerpc/platforms/powernv/
H A Dopal-async.c157 const __be32 *async; opal_async_comp_init() local
167 async = of_get_property(opal_node, "opal-msg-async-num", NULL); opal_async_comp_init()
168 if (!async) { opal_async_comp_init()
169 pr_err("%s: %s has no opal-msg-async-num\n", opal_async_comp_init()
175 opal_max_async_tokens = be32_to_cpup(async); opal_async_comp_init()
H A Dopal-sensor.c53 pr_err("%s: Failed to wait for the async response, %d\n", opal_get_sensor_data()
H A Dopal-sysparam.c63 pr_err("%s: Failed to wait for the async response, %zd\n", opal_get_sys_param()
97 pr_err("%s: Failed to wait for the async response, %d\n", opal_set_sys_param()
/linux-4.1.27/include/media/
H A Dsh_mobile_csi2.h36 const char *name; /* async matching: client name */
H A Dv4l2-async.h60 /* v4l2-async core private: not to be used by drivers */
/linux-4.1.27/include/net/irda/
H A Dwrapper.h5 * Description: IrDA SIR async wrapper layer
45 /* States for receiving a frame in async mode */
/linux-4.1.27/drivers/usb/host/
H A Doxu210hp.h82 #define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
94 #define CMD_PARK (1<<11) /* enable "park" on async qh */
97 #define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
98 #define CMD_ASE (1<<5) /* async schedule enable */
112 #define STS_IAA (1<<5) /* Interrupted on async advance */
131 u32 async_next; /* address of next async queue head */
248 /* next async queue entry, or pointer to interrupt/periodic QH */
251 /* for periodic/async schedules and qtd lists, mark end of list */
260 * For entries in the async schedule, the type tag always says "qh".
273 * These appear in both the async and (for interrupt) periodic schedules.
391 /* async schedule support */
392 struct ehci_qh *async; member in struct:oxu_hcd
437 #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
438 #define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */
H A Dfotg210.h33 * fotg210_hcd: async, unlink, periodic (and shadow), ...
61 FOTG210_HRTIMER_POLL_ASS, /* Poll for async schedule off */
66 FOTG210_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
69 FOTG210_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
103 /* async schedule support */
104 struct fotg210_qh *async; member in struct:fotg210_hcd
110 unsigned async_count; /* async activity count */
213 #define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
227 #define CMD_PARK (1<<11) /* enable "park" on async qh */
229 #define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
230 #define CMD_ASE (1<<5) /* async schedule enable */
244 #define STS_IAA (1<<5) /* Interrupted on async advance */
261 u32 async_next; /* address of next async queue head */
366 /* next async queue entry, or pointer to interrupt/periodic QH */
370 /* for periodic/async schedules and qtd lists, mark end of list */
380 * For entries in the async schedule, the type tag always says "qh".
397 * These appear in both the async and (for interrupt) periodic schedules.
405 #define QH_HEAD (1 << 15) /* Head of async reclamation list */
H A Dfusbh200.h33 * fusbh200_hcd: async, unlink, periodic (and shadow), ...
61 FUSBH200_HRTIMER_POLL_ASS, /* Poll for async schedule off */
66 FUSBH200_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
69 FUSBH200_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
103 /* async schedule support */
104 struct fusbh200_qh *async; member in struct:fusbh200_hcd
110 unsigned async_count; /* async activity count */
208 #define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
222 #define CMD_PARK (1<<11) /* enable "park" on async qh */
224 #define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
225 #define CMD_ASE (1<<5) /* async schedule enable */
239 #define STS_IAA (1<<5) /* Interrupted on async advance */
256 u32 async_next; /* address of next async queue head */
358 /* next async queue entry, or pointer to interrupt/periodic QH */
361 /* for periodic/async schedules and qtd lists, mark end of list */
370 * For entries in the async schedule, the type tag always says "qh".
387 * These appear in both the async and (for interrupt) periodic schedules.
395 #define QH_HEAD (1 << 15) /* Head of async reclamation list */
H A Dehci-mem.c26 * - async and periodic schedules, shared by HC and HCD ... these
122 if (ehci->async) ehci_mem_cleanup()
123 qh_destroy(ehci, ehci->async); ehci_mem_cleanup()
124 ehci->async = NULL; ehci_mem_cleanup()
183 ehci->async = ehci_qh_alloc (ehci, flags); ehci_mem_init()
184 if (!ehci->async) { ehci_mem_init()
H A Dehci-q.c164 /* If an async split transaction gets an error or is unlinked, ehci_clear_tt_buffer()
443 * async transaction in the TT buffer. qh_completions()
667 qtd->hw_alt_next = ehci->async->hw->hw_alt_next; qh_urb_transaction()
953 /* Stop waiting to turn off the async schedule */ enable_async()
966 /* The async schedule and unlink lists are supposed to be empty */ disable_async()
967 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || disable_async()
974 /* move qh (and its qtds) onto async queue; maybe enable queue. */
991 head = ehci->async; qh_link_async()
1265 prev = ehci->async; single_unlink_async()
1299 /* the async qh for the qtds being unlinked are now gone from the HC */
1307 ehci_writel(ehci, (u32) ehci->async->qh_dma, end_unlink_async()
1381 /* Find the last async QH which has been empty for a timer cycle */ unlink_empty_async()
1382 for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { unlink_empty_async()
1404 /* The root hub is suspended; unlink all the async QHs */ unlink_empty_async_suspended()
1409 while (ehci->async->qh_next.qh) { unlink_empty_async_suspended()
1410 qh = ehci->async->qh_next.qh; unlink_empty_async_suspended()
1417 /* makes sure the async qh will become idle */
1437 ehci->qh_scan_next = ehci->async->qh_next.qh; scan_async()
H A Dehci.h80 * ehci_hcd: async, unlink, periodic (and shadow), ...
108 EHCI_HRTIMER_POLL_ASS, /* Poll for async schedule off */
114 EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
117 EHCI_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
152 /* async schedule support */
153 struct ehci_qh *async; member in struct:ehci_hcd
158 unsigned async_count; /* async activity count */
351 /* next async queue entry, or pointer to interrupt/periodic QH */
354 /* for periodic/async schedules and qtd lists, mark end of list */
363 * For entries in the async schedule, the type tag always says "qh".
381 * These appear in both the async and (for interrupt) periodic schedules.
389 #define QH_HEAD (1 << 15) /* Head of async reclamation list */
H A Dsl811.h149 /* async schedule: control, bulk */
150 struct list_head async; member in struct:sl811
187 /* async schedule */
H A Doxu210hp-hcd.c235 MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
292 * async queue SHRINK often precedes IAA. while it's ready timer_action()
726 if (oxu->async) ehci_mem_cleanup()
727 qh_put(oxu->async); ehci_mem_cleanup()
728 oxu->async = NULL; ehci_mem_cleanup()
759 oxu->async = oxu_qh_alloc(oxu); ehci_mem_init()
760 if (!oxu->async) ehci_mem_init()
1284 qtd->hw_alt_next = oxu->async->hw_alt_next; qh_urb_transaction()
1494 /* Move qh (and its qtds) onto async queue; maybe enable queue.
1501 /* (re)start the async schedule? */ qh_link_async()
1502 head = oxu->async; qh_link_async()
1661 /* The async qh for the qtds being reclaimed are now unlinked from the HC */
1686 qh_put(qh); /* refcount from async list */ end_unlink_async()
1688 /* it's not free to turn the async schedule on/off; leave it end_unlink_async()
1692 && oxu->async->qh_next.qh == NULL) end_unlink_async()
1702 /* makes sure the async qh will become idle */
1717 /* stop async schedule right now? */ start_unlink_async()
1718 if (unlikely(qh == oxu->async)) { start_unlink_async()
1734 prev = oxu->async; start_unlink_async()
1766 qh = oxu->async->qh_next.qh; scan_async()
2407 (oxu->async->qh_next.ptr != NULL || ehci_work()
2581 /* stop async processing after it's idled a bit */ oxu_watchdog()
2583 start_unlink_async(oxu, oxu->async); oxu_watchdog()
2625 * dedicate a qh for the async ring head, since we couldn't unlink oxu_hcd_init()
2626 * a 'real' qh without stopping the async schedule [4.8]. use it oxu_hcd_init()
2631 oxu->async->qh_next.qh = NULL; oxu_hcd_init()
2632 oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma); oxu_hcd_init()
2633 oxu->async->hw_info1 = cpu_to_le32(QH_HEAD); oxu_hcd_init()
2634 oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT); oxu_hcd_init()
2635 oxu->async->hw_qtd_next = EHCI_LIST_END; oxu_hcd_init()
2636 oxu->async->qh_state = QH_STATE_LINKED; oxu_hcd_init()
2637 oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma); oxu_hcd_init()
2645 * NVidia and ALI silicon), maximizes throughput on the async oxu_hcd_init()
2721 writel((u32) oxu->async->qh_dma, &oxu->regs->async_next); oxu_run()
2788 if (oxu->async) oxu_stop()
3038 for (tmp = oxu->async->qh_next.qh; oxu_endpoint_disable()
3551 writel((u32) oxu->async->qh_dma, &oxu->regs->async_next); oxu_bus_resume()
3586 if (oxu->async->qh_next.qh) oxu_bus_resume()
H A Dohci-mem.c15 * - async and periodic schedules, shared by HC and HCD ... these
H A Dsl811-hcd.c298 * from the previous frame), and the rest of the time is async
315 else if (!list_empty(&sl811->async)) start()
316 ep = container_of(sl811->async.next, start()
330 if (ep->schedule.next == &sl811->async) start()
440 /* async deschedule? */
468 /* we might turn SOFs back on again for the async schedule */
743 if (sl811->periodic_count == 0 && list_empty(&sl811->async)) sl811h_irq()
895 list_add_tail(&ep->schedule, &sl811->async); sl811h_urb_enqueue()
1434 list_for_each_entry (ep, &sl811->async, schedule) { sl811h_show()
1457 if (!list_empty(&sl811->async)) sl811h_show()
1692 INIT_LIST_HEAD(&sl811->async); sl811h_probe()
H A Dehci-hcd.c98 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
515 * dedicate a qh for the async ring head, since we couldn't unlink ehci_init()
516 * a 'real' qh without stopping the async schedule [4.8]. use it ehci_init()
521 ehci->async->qh_next.qh = NULL; ehci_init()
522 hw = ehci->async->hw; ehci_init()
523 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); ehci_init()
530 ehci->async->qh_state = QH_STATE_LINKED; ehci_init()
531 hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); ehci_init()
544 * NVidia and ALI silicon), maximizes throughput on the async ehci_init()
582 ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next); ehci_run()
H A Dfotg210-hcd.c85 MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
399 == fotg210->async->hw->hw_alt_next) qh_lines()
429 if (td->hw_alt_next == fotg210->async->hw->hw_alt_next) qh_lines()
489 /* dumps a snapshot of the async schedule. fill_async_buffer()
494 for (qh = fotg210->async->qh_next.qh; size > 0 && qh; fill_async_buffer()
742 temp = scnprintf(next, size, "async unlink qh %p\n", fill_registers_buffer()
879 if (!debugfs_create_file("async", S_IRUGO, fotg210->debug_dir, bus, create_debug_files()
1142 fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n", fotg210_poll_ASS()
1163 /* Turn off the async schedule after a brief delay */ fotg210_disable_ASE()
1873 * - async and periodic schedules, shared by HC and HCD ... these
1970 if (fotg210->async) fotg210_mem_cleanup()
1971 qh_destroy(fotg210, fotg210->async); fotg210_mem_cleanup()
1972 fotg210->async = NULL; fotg210_mem_cleanup()
2026 fotg210->async = fotg210_qh_alloc(fotg210, flags); fotg210_mem_init()
2027 if (!fotg210->async) fotg210_mem_init()
2212 /* If an async split transaction gets an error or is unlinked, fotg210_clear_tt_buffer()
2501 * async transaction in the TT buffer. qh_completions()
2743 qtd->hw_alt_next = fotg210->async->hw->hw_alt_next; qh_urb_transaction()
3015 /* Stop waiting to turn off the async schedule */ enable_async()
3028 /* The async schedule and async_unlink list are supposed to be empty */ disable_async()
3029 WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink); disable_async()
3035 /* move qh (and its qtds) onto async queue; maybe enable queue. */
3052 head = fotg210->async; qh_link_async()
3226 prev = fotg210->async; single_unlink_async()
3267 /* the async qh for the qtds being unlinked are now gone from the HC */
3306 /* Unlink all the async QHs that have been empty for a timer cycle */ unlink_empty_async()
3307 next = fotg210->async->qh_next.qh; unlink_empty_async()
3334 /* makes sure the async qh will become idle */
3362 fotg210->qh_scan_next = fotg210->async->qh_next.qh; scan_async()
5191 * dedicate a qh for the async ring head, since we couldn't unlink hcd_fotg210_init()
5192 * a 'real' qh without stopping the async schedule [4.8]. use it hcd_fotg210_init()
5197 fotg210->async->qh_next.qh = NULL; hcd_fotg210_init()
5198 hw = fotg210->async->hw; hcd_fotg210_init()
5199 hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma); hcd_fotg210_init()
5203 fotg210->async->qh_state = QH_STATE_LINKED; hcd_fotg210_init()
5204 hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma); hcd_fotg210_init()
5212 * NVidia and ALI silicon), maximizes throughput on the async hcd_fotg210_init()
5251 fotg210_writel(fotg210, (u32)fotg210->async->qh_dma, fotg210_run()
5645 for (tmp = fotg210->async->qh_next.qh; fotg210_endpoint_disable()
H A Dfusbh200-hcd.c84 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
387 == fusbh200->async->hw->hw_alt_next) qh_lines()
417 if (td->hw_alt_next == fusbh200->async->hw->hw_alt_next) qh_lines()
469 /* dumps a snapshot of the async schedule. fill_async_buffer()
474 for (qh = fusbh200->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh) fill_async_buffer()
715 temp = scnprintf(next, size, "async unlink qh %p\n", fill_registers_buffer()
849 if (!debugfs_create_file("async", S_IRUGO, fusbh200->debug_dir, bus, create_debug_files()
1108 fusbh200_dbg(fusbh200, "Waited too long for the async schedule status (%x/%x), giving up\n", fusbh200_poll_ASS()
1128 /* Turn off the async schedule after a brief delay */ fusbh200_disable_ASE()
1825 * - async and periodic schedules, shared by HC and HCD ... these
1920 if (fusbh200->async) fusbh200_mem_cleanup()
1921 qh_destroy(fusbh200, fusbh200->async); fusbh200_mem_cleanup()
1922 fusbh200->async = NULL; fusbh200_mem_cleanup()
1977 fusbh200->async = fusbh200_qh_alloc (fusbh200, flags); fusbh200_mem_init()
1978 if (!fusbh200->async) { fusbh200_mem_init()
2163 /* If an async split transaction gets an error or is unlinked, fusbh200_clear_tt_buffer()
2451 * async transaction in the TT buffer. qh_completions()
2692 qtd->hw_alt_next = fusbh200->async->hw->hw_alt_next; qh_urb_transaction()
2962 /* Stop waiting to turn off the async schedule */ enable_async()
2975 /* The async schedule and async_unlink list are supposed to be empty */ disable_async()
2976 WARN_ON(fusbh200->async->qh_next.qh || fusbh200->async_unlink); disable_async()
2982 /* move qh (and its qtds) onto async queue; maybe enable queue. */
2999 head = fusbh200->async; qh_link_async()
3173 prev = fusbh200->async; single_unlink_async()
3213 /* the async qh for the qtds being unlinked are now gone from the HC */
3252 /* Unlink all the async QHs that have been empty for a timer cycle */ unlink_empty_async()
3253 next = fusbh200->async->qh_next.qh; unlink_empty_async()
3279 /* makes sure the async qh will become idle */
3306 fusbh200->qh_scan_next = fusbh200->async->qh_next.qh; scan_async()
5115 * dedicate a qh for the async ring head, since we couldn't unlink hcd_fusbh200_init()
5116 * a 'real' qh without stopping the async schedule [4.8]. use it hcd_fusbh200_init()
5121 fusbh200->async->qh_next.qh = NULL; hcd_fusbh200_init()
5122 hw = fusbh200->async->hw; hcd_fusbh200_init()
5123 hw->hw_next = QH_NEXT(fusbh200, fusbh200->async->qh_dma); hcd_fusbh200_init()
5127 fusbh200->async->qh_state = QH_STATE_LINKED; hcd_fusbh200_init()
5128 hw->hw_alt_next = QTD_NEXT(fusbh200, fusbh200->async->dummy->qtd_dma); hcd_fusbh200_init()
5136 * NVidia and ALI silicon), maximizes throughput on the async hcd_fusbh200_init()
5174 fusbh200_writel(fusbh200, (u32)fusbh200->async->qh_dma, &fusbh200->regs->async_next); fusbh200_run()
5553 for (tmp = fusbh200->async->qh_next.qh; fusbh200_endpoint_disable()
H A Disp116x.h274 /* async schedule: control, bulk */
275 struct list_head async; member in struct:isp116x
322 /* async schedule */
H A Disp116x-hcd.c315 /* async deschedule */
515 list_for_each_entry(ep, &isp116x->async, schedule) { start_atl_transfers()
563 if ((&isp116x->async)->next != (&isp116x->async)->prev) start_atl_transfers()
564 list_move(&isp116x->async, (&isp116x->async)->next); start_atl_transfers()
771 list_add_tail(&ep->schedule, &isp116x->async); isp116x_urb_enqueue()
1630 INIT_LIST_HEAD(&isp116x->async); isp116x_probe()
H A Dehci-dbg.c430 == ehci->async->hw->hw_alt_next) qh_lines()
461 if (td->hw_alt_next == ehci->async->hw->hw_alt_next) qh_lines()
513 /* dumps a snapshot of the async schedule. fill_async_buffer()
518 for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh) fill_async_buffer()
911 temp = scnprintf(next, size, "async unlink qh %p\n", fill_registers_buffer()
1057 if (!debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus, create_debug_files()
/linux-4.1.27/drivers/net/ethernet/sfc/
H A Dmcdi.c373 struct efx_mcdi_async_param *async; efx_mcdi_release() local
378 async = list_first_entry_or_null( efx_mcdi_release()
380 if (async) { efx_mcdi_release()
382 efx_mcdi_send_request(efx, async->cmd, efx_mcdi_release()
383 (const efx_dword_t *)(async + 1), efx_mcdi_release()
384 async->inlen); efx_mcdi_release()
390 if (async) efx_mcdi_release()
406 struct efx_mcdi_async_param *async; efx_mcdi_complete_async() local
442 async = list_first_entry(&mcdi->async_list, efx_mcdi_complete_async()
444 list_del(&async->list); efx_mcdi_complete_async()
447 outbuf = (efx_dword_t *)(async + 1); efx_mcdi_complete_async()
449 min(async->outlen, data_len)); efx_mcdi_complete_async()
450 if (!timeout && rc && !async->quiet) { efx_mcdi_complete_async()
454 efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf, efx_mcdi_complete_async()
457 async->complete(efx, async->cookie, rc, outbuf, data_len); efx_mcdi_complete_async()
458 kfree(async); efx_mcdi_complete_async()
687 struct efx_mcdi_async_param *async; _efx_mcdi_rpc_async() local
697 async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), _efx_mcdi_rpc_async()
699 if (!async) _efx_mcdi_rpc_async()
702 async->cmd = cmd; _efx_mcdi_rpc_async()
703 async->inlen = inlen; _efx_mcdi_rpc_async()
704 async->outlen = outlen; _efx_mcdi_rpc_async()
705 async->quiet = quiet; _efx_mcdi_rpc_async()
706 async->complete = complete; _efx_mcdi_rpc_async()
707 async->cookie = cookie; _efx_mcdi_rpc_async()
708 memcpy(async + 1, inbuf, inlen); _efx_mcdi_rpc_async()
713 list_add_tail(&async->list, &mcdi->async_list); _efx_mcdi_rpc_async()
718 if (mcdi->async_list.next == &async->list && _efx_mcdi_rpc_async()
725 kfree(async); _efx_mcdi_rpc_async()
840 struct efx_mcdi_async_param *async, *next; efx_mcdi_flush_async() local
862 /* Nothing else will access the async list now, so it is safe efx_mcdi_flush_async()
867 list_for_each_entry_safe(async, next, &mcdi->async_list, list) { efx_mcdi_flush_async()
868 async->complete(efx, async->cookie, -ENETDOWN, NULL, 0); efx_mcdi_flush_async()
869 list_del(&async->list); efx_mcdi_flush_async()
870 kfree(async); efx_mcdi_flush_async()
/linux-4.1.27/drivers/isdn/hysdn/
H A Dhysdn_sched.c84 /* first of all async requests are handled */ hysdn_sched_tx()
94 } /* async request */ hysdn_sched_tx()
156 hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1); hysdn_tx_cfgline()
161 hysdn_addlog(card, "async tx-cfg delayed"); hysdn_tx_cfgline()
179 hysdn_addlog(card, "async tx-cfg data queued"); hysdn_tx_cfgline()
186 hysdn_addlog(card, "async tx-cfg waiting for tx-ready"); hysdn_tx_cfgline()
194 hysdn_addlog(card, "async tx-cfg data send"); hysdn_tx_cfgline()
H A Dhysdn_defs.h85 #define LOG_SCHED_ASYN 0x00001000 /* debug schedulers async tx routines */
178 /* scheduler for data transfer (only async parts) */
179 unsigned char async_data[256];/* async data to be sent (normally for config) */
181 unsigned short volatile async_channel;/* channel number for async transfer */
/linux-4.1.27/include/linux/usb/
H A Dehci_def.h58 #define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
75 #define CMD_ASPE (1<<13) /* async schedule prefetch enable */
78 #define CMD_PARK (1<<11) /* enable "park" on async qh */
81 #define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
82 #define CMD_ASE (1<<5) /* async schedule enable */
97 #define STS_IAA (1<<5) /* Interrupted on async advance */
114 u32 async_next; /* address of next async queue head */
H A Dmsm_hsusb_hw.h53 #define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
/linux-4.1.27/drivers/base/power/
H A Dmain.c29 #include <linux/async.h>
222 * @async: If unset, wait only if the device's power.async_suspend flag is set.
224 static void dpm_wait(struct device *dev, bool async) dpm_wait() argument
229 if (async || (pm_async_enabled && dev->power.async_suspend)) dpm_wait()
239 static void dpm_wait_for_children(struct device *dev, bool async) dpm_wait_for_children() argument
241 device_for_each_child(dev, &async, dpm_wait_fn); dpm_wait_for_children()
468 * @async: If true, the device is being resumed asynchronously.
473 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) device_resume_noirq() argument
488 dpm_wait(dev->parent, async); device_resume_noirq()
531 pm_dev_err(dev, pm_transition, " async", error); async_resume_noirq()
553 * Advanced the async threads upfront, dpm_resume_noirq()
554 * in case the starting of async threads is dpm_resume_noirq()
555 * delayed by non-async resuming devices. dpm_resume_noirq()
598 * @async: If true, the device is being resumed asynchronously.
602 static int device_resume_early(struct device *dev, pm_message_t state, bool async) device_resume_early() argument
617 dpm_wait(dev->parent, async); device_resume_early()
656 pm_dev_err(dev, pm_transition, " async", error); async_resume_early()
675 * Advanced the async threads upfront, dpm_resume_early()
676 * in case the starting of async threads is dpm_resume_early()
677 * delayed by non-async resuming devices. dpm_resume_early()
728 * @async: If true, the device is being resumed asynchronously.
730 static int device_resume(struct device *dev, pm_message_t state, bool async) device_resume() argument
749 dpm_wait(dev->parent, async); device_resume()
826 pm_dev_err(dev, pm_transition, " async", error); async_resume()
1009 * @async: If true, the device is being suspended asynchronously.
1014 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) __device_suspend_noirq() argument
1034 dpm_wait_for_children(dev, async); __device_suspend_noirq()
1075 pm_dev_err(dev, pm_transition, " async", error); async_suspend_noirq()
1154 * @async: If true, the device is being suspended asynchronously.
1158 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) __device_suspend_late() argument
1180 dpm_wait_for_children(dev, async); __device_suspend_late()
1221 pm_dev_err(dev, pm_transition, " async", error); async_suspend_late()
1341 * @async: If true, the device is being suspended asynchronously.
1343 static int __device_suspend(struct device *dev, pm_message_t state, bool async) __device_suspend() argument
1353 dpm_wait_for_children(dev, async); __device_suspend()
1471 pm_dev_err(dev, pm_transition, " async", error); async_suspend()
H A Dsysfs.c62 * async - Report/change current async suspend setting for the device
69 * All devices have one of the following two values for power/async:
589 static DEVICE_ATTR(async, 0644, async_show, async_store);
/linux-4.1.27/drivers/mtd/maps/
H A Dbfin-async-flash.c2 * drivers/mtd/maps/bfin-async-flash.c
5 * mapped onto the same async bank. The BF533-STAMP does this
35 #define DRIVER_NAME "bfin-async-flash"
196 MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
/linux-4.1.27/drivers/net/wan/
H A Dx25_asy.h10 /* X25 async protocol characters. */
H A Dx25_asy.c6 * o sync/async switching ?
10 * to do CCITT then in theory all you need is to nick the HDLC async
576 /* Perform the low-level X.25 async init */ x25_asy_open_tty()
786 pr_info("X.25 async: version 0.00 ALPHA (dynamic channels, max=%d)\n", init_x25_asy()
/linux-4.1.27/drivers/media/v4l2-core/
H A DMakefile9 v4l2-async.o
/linux-4.1.27/drivers/scsi/
H A Datp870u.h44 unsigned short async[2]; member in struct:atp_unit
H A Dscsi_pm.c10 #include <linux/async.h>
146 * If a user has disabled async probing a likely reason scsi_bus_resume_common()
151 if (strncmp(scsi_scan_type, "async", 5) != 0) scsi_bus_resume_common()
168 /* Wait until async scanning is finished */ scsi_bus_prepare()
H A D53c700.c32 * The 700 is the lowliest of the line, it can only do async SCSI.
708 /* do the best we can, but the async clock will be out NCR_700_chip_setup()
709 * of spec: sync divider 2, async divider 3 */ NCR_700_chip_setup()
710 DEBUG(("53c700: sync 2 async 3\n")); NCR_700_chip_setup()
715 /* sync divider 1.5, async divider 3 */ NCR_700_chip_setup()
716 DEBUG(("53c700: sync 1.5 async 3\n")); NCR_700_chip_setup()
723 /* sync divider 1, async divider 2 */ NCR_700_chip_setup()
724 DEBUG(("53c700: sync 1 async 2\n")); NCR_700_chip_setup()
729 /* sync divider 1, async divider 1.5 */ NCR_700_chip_setup()
730 DEBUG(("53c700: sync 1 async 1.5\n")); NCR_700_chip_setup()
735 DEBUG(("53c700: sync 1 async 1\n")); NCR_700_chip_setup()
738 /* sync divider 1, async divider 1 */ NCR_700_chip_setup()
2022 /* if we're currently async, make sure the period is reasonable */ NCR_700_set_offset()
H A Dgdth.c3139 TRACE2(("gdth_interrupt() async. event\n")); __gdth_interrupt()
3433 "GDT HA %u, service %u, async. status %u/%lu unknown",
3435 "GDT HA %u, service %u, async. status %u/%lu unknown",
3583 "GDT HA %u, async. status 75 unknown",
3628 ha->dvr.eu.async.ionode = ha->hanum; gdth_async_event()
3629 ha->dvr.eu.async.status = ha->status; gdth_async_event()
3632 ha->dvr.size = sizeof(ha->dvr.eu.async); gdth_async_event()
3633 ha->dvr.eu.async.ionode = ha->hanum; gdth_async_event()
3634 ha->dvr.eu.async.service = ha->service; gdth_async_event()
3635 ha->dvr.eu.async.status = ha->status; gdth_async_event()
3636 ha->dvr.eu.async.info = ha->info; gdth_async_event()
3637 *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2; gdth_async_event()
3661 printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string); gdth_log_event()
3664 dvr->eu.async.ionode,dvr->event_string); gdth_log_event()
3666 } else if (dvr->eu.async.service == CACHESERVICE && gdth_log_event()
3667 INDEX_OK(dvr->eu.async.status, async_cache_tab)) { gdth_log_event()
3669 dvr->eu.async.status)); gdth_log_event()
3671 f = async_cache_tab[dvr->eu.async.status]; gdth_log_event()
3699 printk("GDT HA %u, Unknown async. event service %d event no. %d\n", gdth_log_event()
3700 dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status); gdth_log_event()
3702 sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d", gdth_log_event()
3703 dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status); gdth_log_event()
4077 evt.event.event_data.size=sizeof(evt.event.event_data.eu.async); ioc_event()
H A Dmesh.h115 #define ASYNC_PARAMS 2 /* sync_params value for async xfers */
H A Dscsi_priv.h5 #include <linux/async.h>
H A Dscsi_scan.c35 #include <linux/async.h>
92 #define SCSI_SCAN_TYPE_DEFAULT "async"
100 MODULE_PARM_DESC(scan, "sync, async or none");
135 * sleep a little. Even if we never get memory, the async scsi_complete_async_scans()
751 * @async: 1 if this device is being scanned asynchronously
762 int *bflags, int async) scsi_add_lun()
994 if (!async && scsi_sysfs_add_sdev(sdev) != 0) scsi_add_lun()
1758 * scsi_prep_async_scan - prepare for an async scan
1903 /* register with the async subsystem so wait_for_device_probe() scsi_scan_host()
761 scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, int *bflags, int async) scsi_add_lun() argument
H A Dnsp32.c1009 * Set async because we don't get proper negotiation. nsp32_queuecommand_lck()
1016 "SDTR_INITIATOR: fall back to async"); nsp32_queuecommand_lck()
1020 * but there are no chance to remove this flag. Set async nsp32_queuecommand_lck()
1028 "Unknown SDTR from target is reached, fall back to async."); nsp32_queuecommand_lck()
1494 seq_puts(m, "async"); nsp32_show_info()
2054 * failed. Fall back to async transfer mode, and set nsp32_msgin_occur()
2279 goto async; nsp32_analyze_sdtr()
2285 * fall back to async mode. If it's ok, then investigate nsp32_analyze_sdtr()
2290 * to async mode. nsp32_analyze_sdtr()
2353 async: nsp32_analyze_sdtr()
2357 nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit: set async"); nsp32_analyze_sdtr()
2409 nsp32_dbg(NSP32_DEBUG_SYNC, "set async"); nsp32_set_async()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dptlrpcd.c176 * guarantee the async RPC can be processed ASAP, we have ptlrpcd_add_rqset()
354 CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n", ptlrpcd_check()
452 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
458 * For example: "ls -l", some async RPCs for statahead are assigned to
462 * such case, the statahead async RPCs can not be processed in time, it is
472 * and the async RPC load within the partners are shared.
475 * thread can be scheduled in time), and try to guarantee the async RPC
754 * non-recovery async RPC to improve overall async RPC efficiency. ptlrpcd_init()
756 * But there are some issues with async I/O RPCs and async non-I/O ptlrpcd_init()
758 * be blocked by some async I/O RPC(s), then will cause other async ptlrpcd_init()
761 * Maybe we should distinguish blocked async RPCs from non-blocked ptlrpcd_init()
762 * async RPCs, and process them in different ptlrpcd sets to avoid ptlrpcd_init()
763 * unnecessary dependency. But how to distribute async RPCs load ptlrpcd_init()
H A Drecover.c308 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async) ptlrpc_recover_import() argument
347 if (!async) { ptlrpc_recover_import()
/linux-4.1.27/include/uapi/linux/
H A Dhysdn_if.h29 #define ERR_ASYNC_TIME 1011 /* timeout sending async data */
H A Dppp-ioctl.h87 #define PPPIOCGASYNCMAP _IOR('t', 88, int) /* get async map */
88 #define PPPIOCSASYNCMAP _IOW('t', 87, int) /* set async map */
90 #define PPPIOCGRASYNCMAP _IOR('t', 85, int) /* get receive async map */
91 #define PPPIOCSRASYNCMAP _IOW('t', 84, int) /* set receive async map */
H A Dtty.h17 #define N_X25 6 /* X.25 async */
H A Dprctl.h42 # define PR_FP_EXC_NONRECOV 1 /* async non-recoverable exc. mode */
43 # define PR_FP_EXC_ASYNC 2 /* async recoverable exception mode */
/linux-4.1.27/arch/arm/mach-omap2/
H A Dusb-tusb6010.c120 printk(error, "async", status); tusb6010_platform_retime()
165 unsigned async, unsigned sync, tusb6010_setup_interface()
173 status = gpmc_cs_request(async, SZ_16M, (unsigned long *) tusb6010_setup_interface()
181 async_cs = async; tusb6010_setup_interface()
163 tusb6010_setup_interface(struct musb_hdrc_platform_data *data, unsigned ps_refclk, unsigned waitpin, unsigned async, unsigned sync, unsigned irq, unsigned dmachan) tusb6010_setup_interface() argument
/linux-4.1.27/net/ipv6/
H A Dxfrm6_input.c32 int xfrm6_transport_finish(struct sk_buff *skb, int async) xfrm6_transport_finish() argument
38 if (!async) xfrm6_transport_finish()
/linux-4.1.27/drivers/misc/sgi-gru/
H A Dgrukservices.h171 * async completions
180 * Release async resources previously reserved.
188 * Wait for async GRU instructions to complete.
196 * Lock previous reserved async GRU resources
207 * Unlock previous reserved async GRU resources
H A Dgrukservices.c93 * GRU instructions. When an async GRU instruction completes, the
295 * (0 = async resources already reserved)
327 * Release async resources previously reserved.
344 * Wait for async GRU instructions to complete.
358 * Lock previous reserved async GRU resources
381 * Unlock previous reserved async GRU resources
H A Dgrutables.h466 /* ---- the following are used for managing kernel async GRU CBRs --- */
467 int bs_async_dsr_bytes; /* DSRs for async */
468 int bs_async_cbrs; /* CBRs AU for async */
/linux-4.1.27/net/xfrm/
H A Dxfrm_input.c194 int async = 0; xfrm_input() local
196 /* A negative encap_type indicates async resumption. */ xfrm_input()
198 async = 1; xfrm_input()
315 if (async && x->repl->recheck(x, skb, seq)) { xfrm_input()
372 return x->inner_mode->afinfo->transport_finish(skb, async); xfrm_input()
/linux-4.1.27/arch/tile/mm/
H A Dfault.c806 struct async_tlb *async; do_page_fault() local
813 async = &current->thread.dma_async_tlb; do_page_fault()
817 async = NULL; do_page_fault()
819 if (async) { do_page_fault()
828 if (async->fault_num != 0) { do_page_fault()
829 panic("Second async fault %d; old fault was %d (%#lx/%ld)", do_page_fault()
830 fault_num, async->fault_num, do_page_fault()
834 async->fault_num = fault_num; do_page_fault()
835 async->is_fault = is_page_fault; do_page_fault()
836 async->is_write = write; do_page_fault()
837 async->address = address; do_page_fault()
857 struct async_tlb *async = &current->thread.dma_async_tlb; do_async_page_fault() local
866 if (async->fault_num) { do_async_page_fault()
868 * Clear async->fault_num before calling the page-fault do_async_page_fault()
873 int fault_num = async->fault_num; do_async_page_fault()
874 async->fault_num = 0; do_async_page_fault()
875 handle_page_fault(regs, fault_num, async->is_fault, do_async_page_fault()
876 async->address, async->is_write); do_async_page_fault()
/linux-4.1.27/arch/blackfin/kernel/cplb-nompu/
H A Dcplbinit.c150 /* Addressing hole up to the async bank. */ generate_cplb_tables_all()
180 /* Normally this hole is caught by the async below. */ generate_cplb_tables_all()
189 /* Addressing hole up to the async bank. */ generate_cplb_tables_all()
/linux-4.1.27/drivers/usb/core/
H A Ddevio.c31 * 30.09.2005 0.3 Fix user-triggerable oops in async URB delivery
69 spinlock_t lock; /* protects the async urb lists */
82 struct async { struct
271 * async list handling
274 static struct async *alloc_async(unsigned int numisoframes) alloc_async()
276 struct async *as; alloc_async()
278 as = kzalloc(sizeof(struct async), GFP_KERNEL); alloc_async()
289 static void free_async(struct async *as) free_async()
308 static void async_newpending(struct async *as) async_newpending()
318 static void async_removepending(struct async *as) async_removepending()
328 static struct async *async_getcompleted(struct usb_dev_state *ps) async_getcompleted()
331 struct async *as = NULL; async_getcompleted()
335 as = list_entry(ps->async_completed.next, struct async, async_getcompleted()
343 static struct async *async_getpending(struct usb_dev_state *ps, async_getpending()
346 struct async *as; async_getpending()
456 struct async *as;
491 struct async *as = urb->context; async_completed()
536 struct async *as; destroy_async()
541 as = list_entry(list->next, struct async, asynclist); destroy_async()
564 if (ifnum == list_entry(p, struct async, asynclist)->ifnum) destroy_async_on_interface()
607 /* force async requests to complete */ driver_disconnect()
941 struct async *as; usbdev_release()
1290 struct async *as = NULL; proc_do_submiturb()
1439 u += sizeof(struct async) + sizeof(struct urb) + uurb->buffer_length + proc_do_submiturb()
1626 struct async *as; proc_unlinkurb()
1646 static int processcompl(struct async *as, void __user * __user *arg) processcompl()
1683 static struct async *reap_as(struct usb_dev_state *ps) reap_as()
1686 struct async *as = NULL; reap_as()
1708 struct async *as = reap_as(ps); proc_reapurb()
1722 struct async *as; proc_reapurbnonblock()
1815 static int processcompl_compat(struct async *as, void __user * __user *arg) processcompl_compat()
1851 struct async *as = reap_as(ps); proc_reapurb_compat()
1865 struct async *as; proc_reapurbnonblock_compat()
/linux-4.1.27/kernel/
H A Dasync.c2 * async.c: Asynchronous function calls for boot performance
32 The async core will assign each scheduled event such a sequence cookie and
51 #include <linux/async.h>
190 /* mark that this task has queued an async job, used by module init */ __async_schedule()
319 * current_is_async - is %current an async worker task?
321 * Returns %true if %current is an async worker task.
H A Dworkqueue_internal.h20 * Only to be used in workqueue and async.
/linux-4.1.27/drivers/media/platform/xilinx/
H A Dxilinx-vipp.h21 #include <media/v4l2-async.h>
/linux-4.1.27/drivers/net/fddi/skfp/h/
H A Dfddi.h51 #define FC_ASYNC_LLC 0x50 /* async. LLC frame */
H A Dfplustm.h148 u_short tx_a0_start ; /* async queue A0 start address */
149 u_short tx_a0_size ; /* async queue A0 size */
/linux-4.1.27/drivers/gpu/drm/i915/
H A Dintel_atomic.c103 * @async: asynchronous commit
117 bool async) intel_atomic_commit()
122 if (async) { intel_atomic_commit()
123 DRM_DEBUG_KMS("i915 does not yet support async commit\n"); intel_atomic_commit()
115 intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) intel_atomic_commit() argument
/linux-4.1.27/drivers/dma/ppc4xx/
H A Dadma.h88 * @pdest_page: P destination page for async validate operation
89 * @qdest_page: Q destination page for async validate operation
90 * @pdest: P dma addr for async validate operation
91 * @qdest: Q dma addr for async validate operation
/linux-4.1.27/include/xen/interface/io/
H A Dxs_wire.h86 char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
/linux-4.1.27/arch/mn10300/include/asm/
H A Dintctl-regs.h42 #define NMICR_ABUSERR 0x0008 /* async bus error flag */
/linux-4.1.27/arch/c6x/platforms/
H A Demif.c79 err = of_property_read_u32_array(node, "ti,emifa-async-wait-control", &val, 1); c6x_emifa_init()
/linux-4.1.27/include/linux/
H A Dasync.h2 * async.h: Asynchronous function calls for boot performance
H A Ddm-io.h67 * For async io calls, users can alternatively use the dm_io() function below
H A Dcompaction.h25 /* Zone lock or lru_lock was contended in async compaction */
H A Dblk-mq.h222 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
223 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dr600_dma.c93 * r600_dma_stop - stop the async dma engine
97 * Stop the async dma engine (r6xx-evergreen).
113 * r600_dma_resume - setup and start the async dma engine
187 * r600_dma_fini - tear down the async dma engine
191 * Stop the async dma engine and free the ring (r6xx-evergreen).
205 * Check if the async DMA engine is locked up.
221 * r600_dma_ring_test - simple async dma engine test
H A Dcik_sdma.c244 * cik_sdma_gfx_stop - stop the gfx async dma engines
248 * Stop the gfx async dma ring buffers (CIK).
285 * cik_sdma_rlc_stop - stop the compute async dma engines
289 * Stop the compute async dma queues (CIK).
302 * Halt or unhalt the async dma engines (CIK).
324 * cik_sdma_enable - stop the async dma engines
329 * Halt or unhalt the async dma engines (CIK).
358 * cik_sdma_gfx_resume - setup and start the async dma engines
441 * cik_sdma_rlc_resume - setup and start the async dma engines
521 * cik_sdma_resume - setup and start the async dma engines
551 * cik_sdma_fini - tear down the async dma engines
555 * Stop the async dma engines and free the rings (CIK).
636 * cik_sdma_ring_test - simple async dma engine test
767 * Check if the async DMA engine is locked up (CIK).
H A Dni_dma.c151 * cayman_dma_stop - stop the async dma engines
155 * Stop the async dma engines (cayman-SI).
180 * cayman_dma_resume - setup and start the async dma engines
265 * cayman_dma_fini - tear down the async dma engines
269 * Stop the async dma engines and free the rings (cayman-SI).
284 * Check if the async DMA engine is locked up.
H A DMakefile88 # add async DMA block
H A Devergreen_dma.c168 * Check if the async DMA engine is locked up.
/linux-4.1.27/drivers/staging/android/
H A Dsync.h302 * sync_fence_wait_async() - registers and async wait on the fence
315 * sync_fence_cancel_async() - cancels an async wait
319 * returns 0 if waiter was removed from fence's async waiter list.
320 * returns -ENOENT if waiter was not found on fence's async waiter list.
322 * Cancels a previously registered async wait. Will fail gracefully if
/linux-4.1.27/arch/mips/include/asm/octeon/
H A Dcvmx-fau.h316 * Builds I/O data for async operations
352 * Perform an async atomic 64 bit add. The old value is
372 * Perform an async atomic 32 bit add. The old value is
392 * Perform an async atomic 16 bit add. The old value is
411 * Perform an async atomic 8 bit add. The old value is
429 * Perform an async atomic 64 bit add after the current tag
452 * Perform an async atomic 32 bit add after the current tag
475 * Perform an async atomic 16 bit add after the current tag
498 * Perform an async atomic 8 bit add after the current tag
/linux-4.1.27/include/trace/events/
H A Dcompaction.h130 __entry->sync ? "sync" : "async")
163 __entry->sync ? "sync" : "async",
/linux-4.1.27/net/ipv4/
H A Dxfrm4_input.c40 int xfrm4_transport_finish(struct sk_buff *skb, int async) xfrm4_transport_finish() argument
47 if (!async) xfrm4_transport_finish()
/linux-4.1.27/drivers/staging/lustre/lustre/include/
H A Dlustre_ha.h55 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async);
/linux-4.1.27/arch/tile/include/asm/
H A Dstack.h30 int profile; /* profiling, so stop on async intrpt */
H A Dthread_info.h119 #define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/engine/
H A Dfifo.h74 struct nvkm_event uevent; /* async user trigger */
/linux-4.1.27/arch/m68k/include/asm/
H A Dcontregs.h24 #define AC_ASYNC_VA 0x6000000c /* c async fault virtual address */
H A Dsun3mmu.h38 #define AC_ASYNC_VA 0x6000000c /* c async fault virtual address */
/linux-4.1.27/kernel/power/
H A Dblock_io.c22 * @bio_chain: list of pending biod (for async reading)
/linux-4.1.27/mm/
H A Dmsync.c27 * async writeout immediately.
H A Dcompaction.c282 /* Update where async and sync compaction should restart */ update_pageblock_skip()
310 * very heavily contended. For async compaction, back out if the lock cannot
336 * need_resched() becoming true. If scheduling is needed, async compaction
342 * async compaction due to need_resched()
372 * need_resched() and either schedules in sync compaction or aborts async
377 * Returns true when async compaction should abort.
381 /* async compaction aborts if contended */ compact_should_abort()
421 * pending or async compaction detects need_resched() isolate_freepages_block()
449 * or parallel compactions. For async compaction do not isolate_freepages_block()
662 /* async migration should just abort */ isolate_migratepages_block()
679 * contention, to give chance to IRQs. Abort async compaction isolate_migratepages_block()
937 * to schedule, or even abort async compaction. isolate_freepages()
974 * isolate_freepages_block() might have aborted due to async isolate_freepages()
1081 * need to schedule, or even abort async compaction. isolate_migratepages()
1096 * For async compaction, also only scan in MOVABLE blocks. isolate_migratepages()
1473 * @mode: The migration mode for async, sync light, or sync migration
1528 * It is possible that async compaction aborted due to try_to_compact_pages()
1552 * async compaction, or due to a fatal signal detected. In that try_to_compact_pages()
/linux-4.1.27/sound/pci/asihpi/
H A Dhpi6205.h60 This is used for dynamic allocation of async event array
/linux-4.1.27/sound/soc/intel/atom/sst/
H A Dsst_ipc.c288 dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n"); process_fw_async_msg()
309 "Unrecognized async msg from FW msg_id %#x\n", msg_id); process_fw_async_msg()
332 /* Check for async messages first */ sst_process_reply_mrfld()
334 /*FW sent async large message*/ sst_process_reply_mrfld()
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_atomic.c167 * @async: asynchronous commit
177 struct drm_atomic_state *state, bool async) msm_atomic_commit()
251 if (async) { msm_atomic_commit()
176 msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) msm_atomic_commit() argument
H A Dmsm_drv.h108 /* crtcs pending async atomic updates: */
163 struct drm_atomic_state *state, bool async);
/linux-4.1.27/arch/s390/kernel/
H A Dirq.c170 /* Check against async. stack address range. */ do_softirq_own_stack()
173 /* Need to switch to the async. stack. */ do_softirq_own_stack()
184 /* We are already on the async stack. */ do_softirq_own_stack()
H A Ddumpstack.c27 * touch all of them. The order is: panic stack, async stack, sync stack.
/linux-4.1.27/fs/btrfs/
H A Ddisk-io.c111 * async submit bios are used to offload expensive checksumming
782 struct async_submit_bio *async; run_one_async_start() local
785 async = container_of(work, struct async_submit_bio, work); run_one_async_start()
786 ret = async->submit_bio_start(async->inode, async->rw, async->bio, run_one_async_start()
787 async->mirror_num, async->bio_flags, run_one_async_start()
788 async->bio_offset); run_one_async_start()
790 async->error = ret; run_one_async_start()
796 struct async_submit_bio *async; run_one_async_done() local
799 async = container_of(work, struct async_submit_bio, work); run_one_async_done()
800 fs_info = BTRFS_I(async->inode)->root->fs_info; run_one_async_done()
810 if (async->error) { run_one_async_done()
811 bio_endio(async->bio, async->error); run_one_async_done()
815 async->submit_bio_done(async->inode, async->rw, async->bio, run_one_async_done()
816 async->mirror_num, async->bio_flags, run_one_async_done()
817 async->bio_offset); run_one_async_done()
822 struct async_submit_bio *async; run_one_async_free() local
824 async = container_of(work, struct async_submit_bio, work); run_one_async_free()
825 kfree(async); run_one_async_free()
835 struct async_submit_bio *async; btrfs_wq_submit_bio() local
837 async = kmalloc(sizeof(*async), GFP_NOFS); btrfs_wq_submit_bio()
838 if (!async) btrfs_wq_submit_bio()
841 async->inode = inode; btrfs_wq_submit_bio()
842 async->rw = rw; btrfs_wq_submit_bio()
843 async->bio = bio; btrfs_wq_submit_bio()
844 async->mirror_num = mirror_num; btrfs_wq_submit_bio()
845 async->submit_bio_start = submit_bio_start; btrfs_wq_submit_bio()
846 async->submit_bio_done = submit_bio_done; btrfs_wq_submit_bio()
848 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, btrfs_wq_submit_bio()
851 async->bio_flags = bio_flags; btrfs_wq_submit_bio()
852 async->bio_offset = bio_offset; btrfs_wq_submit_bio()
854 async->error = 0; btrfs_wq_submit_bio()
859 btrfs_set_work_high_priority(&async->work); btrfs_wq_submit_bio()
861 btrfs_queue_work(fs_info->workers, &async->work); btrfs_wq_submit_bio()
894 * when we're called for a write, we're already in the async __btree_submit_bio_start()
907 * when we're called for a write, we're already in the async __btree_submit_bio_done()
931 int async = check_async_write(inode, bio_flags); btree_submit_bio_hook() local
937 * can happen in the async kernel threads btree_submit_bio_hook()
945 } else if (!async) { btree_submit_bio_hook()
H A Ddelayed-inode.h63 * is waiting to be dealt with by the async worker.
/linux-4.1.27/fs/lockd/
H A Dclntproc.c325 * Generic NLM call, async version.
338 dprintk("lockd: call procedure %d on %s (async)\n", __nlm_async_call()
348 /* bootstrap and kick off the async RPC call */ __nlm_async_call()
502 * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
505 * re-implementing lockd for a third time in two months. The async
740 * We always use an async RPC call for this in order not to hang a
/linux-4.1.27/drivers/lguest/
H A Dhypercalls.c172 kill_guest(cpu, "Fetching async hypercalls"); do_async_hcalls()
181 kill_guest(cpu, "Writing result for async hypercall"); do_async_hcalls()
270 * Look in the hypercall ring for the async hypercalls: do_hypercalls()
/linux-4.1.27/arch/blackfin/kernel/
H A Dprocess.c261 * on the access to the async space. Make sure we validate accesses
262 * that cross async banks too.
302 /* not within async bounds */ in_async()
/linux-4.1.27/drivers/staging/lustre/lustre/obdecho/
H A Decho_client.c170 struct page **pages, int npages, int async);
1172 struct page **pages, int npages, int async) cl_echo_object_brw()
1243 async = async && (typ == CRT_WRITE); cl_echo_object_brw()
1244 if (async) cl_echo_object_brw()
1249 async ? "async" : "sync", rc); cl_echo_object_brw()
1547 u64 count, int async, echo_client_kbrw()
1616 rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async); echo_client_kbrw()
1647 int async) echo_client_prep_commit()
1672 if (rw == OBD_BRW_WRITE && async) echo_client_prep_commit()
1708 if (async) echo_client_prep_commit()
1760 int async = 1; echo_client_brw_ioctl() local
1774 async = 0; echo_client_brw_ioctl()
1791 data->ioc_count, async, dummy_oti); echo_client_brw_ioctl()
1797 dummy_oti, async); echo_client_brw_ioctl()
1171 cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, struct page **pages, int npages, int async) cl_echo_object_brw() argument
1545 echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, struct echo_object *eco, u64 offset, u64 count, int async, struct obd_trans_info *oti) echo_client_kbrw() argument
1642 echo_client_prep_commit(const struct lu_env *env, struct obd_export *exp, int rw, struct obdo *oa, struct echo_object *eco, u64 offset, u64 count, u64 batch, struct obd_trans_info *oti, int async) echo_client_prep_commit() argument
/linux-4.1.27/drivers/net/wireless/b43/
H A Dmain.h109 struct b43_firmware_file *fw, bool async);
/linux-4.1.27/block/
H A Dblk-mq.h29 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
/linux-4.1.27/crypto/
H A Dablk_helper.c2 * Shared async block cipher helpers
H A Daead.c149 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? crypto_aead_show()
235 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? crypto_nivaead_show()
301 /* Ignore async algorithms if necessary. */ aead_geniv_alloc()
/linux-4.1.27/arch/sparc/kernel/
H A Dsun4m_irq.c121 #define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
323 printk(KERN_ERR "Module async error\n"); sun4m_nmi()
325 printk(KERN_ERR "MBus/SBus async error\n"); sun4m_nmi()
329 printk(KERN_ERR "VME async error\n"); sun4m_nmi()
/linux-4.1.27/crypto/async_tx/
H A Dasync_memcpy.c80 pr_debug("%s: (async) len: %zu\n", __func__, len); async_memcpy()
H A Dasync_tx.c36 printk(KERN_INFO "async_tx: api initialized (async)\n"); async_tx_init()
256 pr_debug("%s: (async)\n", __func__); async_trigger_callback()
H A Dasync_xor.c192 pr_debug("%s (async): len: %zu\n", __func__, len); async_xor()
288 pr_debug("%s: (async) len: %zu\n", __func__, len); async_xor_val()
/linux-4.1.27/drivers/tty/serial/
H A Dcrisv10.h118 * anything since we only have one type of async serial-port anyway in this
/linux-4.1.27/net/mac802154/
H A Dtx.c98 /* async is priority, otherwise sync is fallback */ ieee802154_tx()
/linux-4.1.27/drivers/rtc/
H A Drtc-opal.c120 pr_err("Failed to get the async token\n"); opal_get_tpo_time()
165 pr_err("Failed to get the async token\n"); opal_set_tpo_time()
/linux-4.1.27/drivers/staging/rtl8712/
H A Drtl871x_io.h78 #define _INTF_ASYNC_ BIT(0) /*support async io*/
92 u8 async);
H A Drtl871x_io.c36 * For r8712u, both sync/async operations are provided.
/linux-4.1.27/drivers/staging/octeon/
H A Dethernet-spi.c81 pr_err("SPI1: SRX Spi4 async FIFO overflow\n"); cvm_oct_spi_rml_interrupt()
145 pr_err("SPI0: SRX Spi4 async FIFO overflow\n"); cvm_oct_spi_rml_interrupt()
/linux-4.1.27/arch/xtensa/platforms/iss/
H A Dconsole.c49 * enables interrupts for a serial port, linking in its async structure into
77 * async structure from the interrupt chain if necessary, and we free
/linux-4.1.27/drivers/gpu/drm/sti/
H A Dsti_drm_drv.c80 struct drm_atomic_state *state, bool async) sti_drm_atomic_commit()
101 if (async) sti_drm_atomic_commit()
79 sti_drm_atomic_commit(struct drm_device *drm, struct drm_atomic_state *state, bool async) sti_drm_atomic_commit() argument
/linux-4.1.27/include/linux/sunrpc/
H A Dsched.h56 * action next procedure for async tasks
114 #define RPC_TASK_ASYNC 0x0001 /* is an async task */
/linux-4.1.27/virt/kvm/
H A Dkvm_main.c1216 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, hva_to_pfn_fast() argument
1222 if (!(async || atomic)) hva_to_pfn_fast()
1249 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, hva_to_pfn_slow() argument
1260 if (async) { hva_to_pfn_slow()
1304 * @async: whether this function need to wait IO complete if the
1314 static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, hva_to_pfn() argument
1322 BUG_ON(atomic && async); hva_to_pfn()
1324 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) hva_to_pfn()
1330 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); hva_to_pfn()
1336 (!async && check_user_page_hwpoison(addr))) { hva_to_pfn()
1350 if (async && vma_is_valid(vma, write_fault)) hva_to_pfn()
1351 *async = true; hva_to_pfn()
1361 bool *async, bool write_fault, bool *writable) __gfn_to_pfn_memslot()
1377 return hva_to_pfn(addr, atomic, async, write_fault, __gfn_to_pfn_memslot()
1381 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, __gfn_to_pfn() argument
1386 if (async) __gfn_to_pfn()
1387 *async = false; __gfn_to_pfn()
1391 return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, __gfn_to_pfn()
1401 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, gfn_to_pfn_async() argument
1404 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); gfn_to_pfn_async()
1360 __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable) __gfn_to_pfn_memslot() argument
/linux-4.1.27/drivers/net/ethernet/altera/
H A Daltera_sgdma.c343 /* If hardware is busy, don't restart async read.
344 * if status register is 0 - meaning initial state, restart async read,
346 * If read status indicate not busy and a status, restart the async
/linux-4.1.27/arch/x86/kernel/
H A Dkvm.c219 * async PF was not yet handled. kvm_async_pf_task_wake()
226 * handles async PF. kvm_async_pf_task_wake()
347 printk(KERN_INFO"KVM setup async PF for cpu %d\n", kvm_guest_cpu_init()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_adminq.h70 bool async; member in struct:i40e_asq_cmd_details
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
H A Di40e_adminq.h70 bool async; member in struct:i40e_asq_cmd_details
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
H A Dhealth.c98 return "async EQ buffer overrun"; hsynd_str()
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
H A Dhif-ops.h40 "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n", hif_write_async()
H A Dhif.c253 scat_req->addr, !read ? "async" : "sync", ath6kl_hif_submit_scat_req()
473 * Mailbox Interrupt, the HTC layer may issue async proc_pending_irqs()
475 * mailbox we use the async handler above called from the proc_pending_irqs()
/linux-4.1.27/drivers/net/wireless/libertas/
H A Dfirmware.c150 lbs_deb_fw("Starting async firmware load\n"); lbs_get_firmware_async()
/linux-4.1.27/drivers/scsi/libsas/
H A Dsas_ata.c26 #include <linux/async.h>
753 ASYNC_DOMAIN_EXCLUSIVE(async); sas_ata_strategy_handler()
782 async_schedule_domain(async_sas_ata_eh, dev, &async); sas_ata_strategy_handler()
788 async_synchronize_full_domain(&async); sas_ata_strategy_handler()
/linux-4.1.27/drivers/staging/rtl8723au/include/
H A Drtw_io.h97 #define _INTF_ASYNC_ BIT(0) /* support async io */
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
H A Dosc_cl_internal.h141 * extent is a red black tree to manage (async) dirty pages.
208 * CLIO enqueues all DLM locks through ptlrpcd (that is, in "async" mode).
234 * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue().
318 * For async glimpse lock.
/linux-4.1.27/drivers/input/joystick/iforce/
H A Diforce.h84 /* Buffer for async write */
/linux-4.1.27/drivers/cpufreq/
H A Ddavinci-cpufreq.c150 asyncclk = clk_get(cpufreq.dev, "async"); davinci_cpufreq_probe()
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Domap_dmm_priv.h149 bool async; member in struct:refill_engine
/linux-4.1.27/arch/mips/include/asm/mach-pmcs-msp71xx/
H A Dmsp_usb.h90 async buffer status */
/linux-4.1.27/drivers/w1/
H A Dw1_netlink.c58 struct w1_async_cmd async; member in struct:w1_cb_node
464 async); w1_process_cb()
698 node->async.cb = w1_process_cb; w1_cn_callback()
706 list_add_tail(&node->async.async_entry, &dev->async_list); w1_cn_callback()
/linux-4.1.27/Documentation/trace/postprocess/
H A Dtrace-vmscan-postprocess.pl645 print "Direct reclaim write file async I/O: $total_direct_writepage_file_async\n";
646 print "Direct reclaim write anon async I/O: $total_direct_writepage_anon_async\n";
659 print "Kswapd reclaim write file async I/O: $total_kswapd_writepage_file_async\n";
660 print "Kswapd reclaim write anon async I/O: $total_kswapd_writepage_anon_async\n";
/linux-4.1.27/kernel/irq/
H A Dautoprobe.c13 #include <linux/async.h>
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_atomic_helper.c927 * @async: asynchronous commit
938 bool async) drm_atomic_helper_commit()
942 if (async) drm_atomic_helper_commit()
992 * DOC: implementing async commit
994 * For now the atomic helpers don't support async commit directly. If there is
998 * For now drivers have to implement async commit themselves, with the following
1019 * while it's guaranteed that no relevant async worker runs means that async
1782 * Note that for now so called async page flips (i.e. updates which are not
1835 /* Driver takes ownership of state on successful async commit. */ drm_atomic_helper_page_flip()
1917 /* Driver takes ownership of state on successful async commit. */ drm_atomic_helper_connector_dpms()
936 drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) drm_atomic_helper_commit() argument
/linux-4.1.27/drivers/staging/fwserial/
H A Dfwserial.h356 * Returns the max send async payload size in bytes based on the unit device
365 /* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */ link_speed_to_max_payload()
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/
H A Dsdio.h308 * flags: backplane width, address increment, sync/async
312 * complete: callback function for command completion (async only)
/linux-4.1.27/fs/nfs/
H A Dread.c7 * modified for async RPC by okir@monad.swb.de
279 * previous async read operation failed.
/linux-4.1.27/drivers/net/irda/
H A Ddonauboe.c420 if (self->async) toshoboe_setbaud()
910 self->async = 1; toshoboe_probe()
942 self->async = 0; toshoboe_probe()
1331 if (self->async) toshoboe_net_open()
1543 self->async = 0; toshoboe_open()
1684 if ((!self->irdad) && (!self->async)) toshoboe_gotosleep()
1710 if ((!self->irdad) && (!self->async)) toshoboe_wakeup()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dstatahead.c77 /* pointer to async getattr enqueue info */
79 /* pointer to the async getattr request */
282 * Used by the async getattr request callback to find entry with index.
567 /* Someone is in glimpse (sync or async), do nothing. */ ll_agl_trigger()
596 CDEBUG(D_READA, "Handling (init) async glimpse: inode = " ll_agl_trigger()
604 CDEBUG(D_READA, "Handled (init) async glimpse: inode= " ll_agl_trigger()
754 /* Release the async ibits lock ASAP to avoid deadlock ll_statahead_interpret()
/linux-4.1.27/drivers/net/ppp/
H A Dppp_async.c2 * PPP async serial channel driver for Linux.
12 * and receiving PPP frames over async serial lines. It relies on
16 * Part of the code in this driver was inspired by the old async-only
517 * Procedure to encode the data for async serial transmission.
626 * Send a packet to the peer over an async tty line.
H A Dppp_synctty.c10 * The async map IOCTL codes are implemented to keep the user mode
12 * the async maps.
28 * Part of the code in this driver was inspired by the old async-only
/linux-4.1.27/drivers/gpu/drm/exynos/
H A Dexynos_drm_g2d.c217 int async; member in struct:g2d_runqueue_node
877 if (g2d->runqueue_node->async) g2d_runqueue_worker()
1276 runqueue_node->async = req->async; exynos_g2d_exec_ioctl()
1295 if (runqueue_node->async) exynos_g2d_exec_ioctl()
/linux-4.1.27/drivers/scsi/ibmvscsi/
H A Dibmvfc.c1187 login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token); ibmvfc_set_login_info()
1188 login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs)); ibmvfc_set_login_info()
2581 "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
2585 * ibmvfc_get_ae_desc - Get text description for async event
2586 * @ae: async event
2627 * ibmvfc_handle_async - Handle an async event from the adapter
2710 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); ibmvfc_handle_async()
3103 * ibmvfc_next_async_crq - Returns the next entry in async queue
3180 struct ibmvfc_async_crq *async; ibmvfc_tasklet() local
3186 /* Pull all the valid messages off the async CRQ */ ibmvfc_tasklet()
3187 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { ibmvfc_tasklet()
3188 ibmvfc_handle_async(async, vhost); ibmvfc_tasklet()
3189 async->valid = 0; ibmvfc_tasklet()
3201 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { ibmvfc_tasklet()
3203 ibmvfc_handle_async(async, vhost); ibmvfc_tasklet()
3204 async->valid = 0; ibmvfc_tasklet()
4260 * ibmvfc_log_ae - Log async events if necessary
4613 dev_err(dev, "Couldn't allocate async queue.\n"); ibmvfc_alloc_mem()
4623 dev_err(dev, "Failed to map async queue\n"); ibmvfc_alloc_mem()
/linux-4.1.27/drivers/mfd/
H A Darizona-core.c184 dev_err(arizona->dev, "Slimbus async overclocked\n"); arizona_overclocked()
188 dev_err(arizona->dev, "ASRC async system overclocked\n"); arizona_overclocked()
190 dev_err(arizona->dev, "ASRC async WARP overclocked\n"); arizona_overclocked()

Completed in 10239 milliseconds

1234