/linux-4.1.27/drivers/scsi/fnic/ |
D | vnic_wq.c | 27 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 31 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 34 vdev = wq->vdev; in vnic_wq_alloc_bufs() 37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_wq_alloc_bufs() 38 if (!wq->bufs[i]) { in vnic_wq_alloc_bufs() 45 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() 48 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs() 49 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs() 51 buf->next = wq->bufs[0]; in vnic_wq_alloc_bufs() 54 buf->next = wq->bufs[i + 1]; in vnic_wq_alloc_bufs() [all …]
|
D | vnic_wq_copy.h | 36 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_avail() argument 38 return wq->ring.desc_avail; in vnic_wq_copy_desc_avail() 41 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_in_use() argument 43 return wq->ring.desc_count - 1 - wq->ring.desc_avail; in vnic_wq_copy_desc_in_use() 46 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) in vnic_wq_copy_next_desc() argument 48 struct fcpio_host_req *desc = wq->ring.descs; in vnic_wq_copy_next_desc() 49 return &desc[wq->to_use_index]; in vnic_wq_copy_next_desc() 52 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) in vnic_wq_copy_post() argument 55 ((wq->to_use_index + 1) == wq->ring.desc_count) ? in vnic_wq_copy_post() 56 (wq->to_use_index = 0) : (wq->to_use_index++); in vnic_wq_copy_post() [all …]
|
D | vnic_wq_copy.c | 25 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) in vnic_wq_copy_enable() argument 27 iowrite32(1, &wq->ctrl->enable); in vnic_wq_copy_enable() 30 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) in vnic_wq_copy_disable() argument 34 iowrite32(0, &wq->ctrl->enable); in vnic_wq_copy_disable() 38 if (!(ioread32(&wq->ctrl->running))) in vnic_wq_copy_disable() 45 wq->index, ioread32(&wq->ctrl->fetch_index), in vnic_wq_copy_disable() 46 ioread32(&wq->ctrl->posted_index)); in vnic_wq_copy_disable() 51 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument 52 void (*q_clean)(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument 55 BUG_ON(ioread32(&wq->ctrl->enable)); in vnic_wq_copy_clean() [all …]
|
D | vnic_wq.h | 96 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument 99 return wq->ring.desc_avail; in vnic_wq_desc_avail() 102 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument 105 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 108 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) in vnic_wq_next_desc() argument 110 return wq->to_use->desc; in vnic_wq_next_desc() 113 static inline void vnic_wq_post(struct vnic_wq *wq, in vnic_wq_post() argument 117 struct vnic_wq_buf *buf = wq->to_use; in vnic_wq_post() 132 iowrite32(buf->index, &wq->ctrl->posted_index); in vnic_wq_post() 134 wq->to_use = buf; in vnic_wq_post() [all …]
|
D | fnic_res.h | 30 static inline void fnic_queue_wq_desc(struct vnic_wq *wq, in fnic_queue_wq_desc() argument 37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); in fnic_queue_wq_desc() 51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); in fnic_queue_wq_desc() 54 static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, in fnic_queue_wq_eth_desc() argument 61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); in fnic_queue_wq_eth_desc() 76 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); in fnic_queue_wq_eth_desc() 79 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, in fnic_queue_wq_copy_desc_icmnd_16() argument 91 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); in fnic_queue_wq_copy_desc_icmnd_16() 121 vnic_wq_copy_post(wq); in fnic_queue_wq_copy_desc_icmnd_16() 124 static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, in fnic_queue_wq_copy_desc_itmf() argument [all …]
|
D | fnic_scsi.c | 143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) in free_wq_copy_descs() argument 153 if (wq->to_clean_index <= fnic->fw_ack_index[0]) in free_wq_copy_descs() 154 wq->ring.desc_avail += (fnic->fw_ack_index[0] in free_wq_copy_descs() 155 - wq->to_clean_index + 1); in free_wq_copy_descs() 157 wq->ring.desc_avail += (wq->ring.desc_count in free_wq_copy_descs() 158 - wq->to_clean_index in free_wq_copy_descs() 166 wq->to_clean_index = in free_wq_copy_descs() 167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; in free_wq_copy_descs() 208 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; in fnic_fw_reset_handler() local 224 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_fw_reset_handler() [all …]
|
D | fnic_fcs.c | 979 struct vnic_wq *wq = &fnic->wq[0]; in fnic_eth_send() local 1007 if (!vnic_wq_desc_avail(wq)) { in fnic_eth_send() 1014 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, in fnic_eth_send() 1025 struct vnic_wq *wq = &fnic->wq[0]; in fnic_send_frame() local 1081 if (!vnic_wq_desc_avail(wq)) { in fnic_send_frame() 1088 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), in fnic_send_frame() 1188 static void fnic_wq_complete_frame_send(struct vnic_wq *wq, in fnic_wq_complete_frame_send() argument 1194 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_wq_complete_frame_send() 1211 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, in fnic_wq_cmpl_handler_cont() 1234 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) in fnic_free_wq_buf() argument [all …]
|
D | fnic.h | 305 ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX]; member 330 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); 355 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
|
D | fnic_res.c | 215 vnic_wq_free(&fnic->wq[i]); in fnic_free_vnic_resources() 256 err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, in fnic_alloc_vnic_resources() 365 vnic_wq_init(&fnic->wq[i], in fnic_alloc_vnic_resources()
|
/linux-4.1.27/drivers/net/ethernet/cisco/enic/ |
D | vnic_wq.c | 30 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 33 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); in vnic_wq_alloc_bufs() 38 if (!wq->bufs[i]) in vnic_wq_alloc_bufs() 43 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() 46 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs() 47 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs() 49 buf->next = wq->bufs[0]; in vnic_wq_alloc_bufs() 53 buf->next = wq->bufs[i + 1]; in vnic_wq_alloc_bufs() 63 wq->to_use = wq->to_clean = wq->bufs[0]; in vnic_wq_alloc_bufs() [all …]
|
D | vnic_wq.h | 91 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument 94 return wq->ring.desc_avail; in vnic_wq_desc_avail() 97 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument 100 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 103 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) in vnic_wq_next_desc() argument 105 return wq->to_use->desc; in vnic_wq_next_desc() 108 static inline void vnic_wq_doorbell(struct vnic_wq *wq) in vnic_wq_doorbell() argument 116 iowrite32(wq->to_use->index, &wq->ctrl->posted_index); in vnic_wq_doorbell() 119 static inline void vnic_wq_post(struct vnic_wq *wq, in vnic_wq_post() argument 125 struct vnic_wq_buf *buf = wq->to_use; in vnic_wq_post() [all …]
|
D | enic_res.h | 43 static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, in enic_queue_wq_desc_ex() argument 49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); in enic_queue_wq_desc_ex() 65 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, in enic_queue_wq_desc_ex() 69 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, in enic_queue_wq_desc_cont() argument 73 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, in enic_queue_wq_desc_cont() 78 static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, in enic_queue_wq_desc() argument 82 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, in enic_queue_wq_desc() 88 static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, in enic_queue_wq_desc_csum() argument 93 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, in enic_queue_wq_desc_csum() 100 static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, in enic_queue_wq_desc_csum_l4() argument [all …]
|
D | enic.h | 167 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; member 204 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) in enic_cq_wq() argument 206 return enic->rq_count + wq; in enic_cq_wq() 231 unsigned int wq) in enic_msix_wq_intr() argument 233 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; in enic_msix_wq_intr()
|
D | enic_main.c | 139 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) in enic_free_wq_buf() argument 141 struct enic *enic = vnic_dev_priv(wq->vdev); in enic_free_wq_buf() 154 static void enic_wq_free_buf(struct vnic_wq *wq, in enic_wq_free_buf() argument 157 enic_free_wq_buf(wq, buf); in enic_wq_free_buf() 167 vnic_wq_service(&enic->wq[q_number], cq_desc, in enic_wq_service() 172 vnic_wq_desc_avail(&enic->wq[q_number]) >= in enic_wq_service() 187 error_status = vnic_wq_error_status(&enic->wq[i]); in enic_log_q_error() 355 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_cont() argument 370 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), in enic_queue_wq_skb_cont() 378 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_vlan() argument [all …]
|
D | enic_res.c | 187 vnic_wq_free(&enic->wq[i]); in enic_free_vnic_resources() 252 vnic_wq_init(&enic->wq[i], in enic_init_vnic_resources() 333 err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, in enic_alloc_vnic_resources()
|
/linux-4.1.27/fs/autofs4/ |
D | waitq.c | 29 struct autofs_wait_queue *wq, *nwq; in autofs4_catatonic_mode() local 40 wq = sbi->queues; in autofs4_catatonic_mode() 42 while (wq) { in autofs4_catatonic_mode() 43 nwq = wq->next; in autofs4_catatonic_mode() 44 wq->status = -ENOENT; /* Magic is gone - report failure */ in autofs4_catatonic_mode() 45 kfree(wq->name.name); in autofs4_catatonic_mode() 46 wq->name.name = NULL; in autofs4_catatonic_mode() 47 wq->wait_ctr--; in autofs4_catatonic_mode() 48 wake_up_interruptible(&wq->queue); in autofs4_catatonic_mode() 49 wq = nwq; in autofs4_catatonic_mode() [all …]
|
/linux-4.1.27/fs/btrfs/ |
D | async-thread.c | 132 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); 168 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) in thresh_queue_hook() argument 170 if (wq->thresh == NO_THRESHOLD) in thresh_queue_hook() 172 atomic_inc(&wq->pending); in thresh_queue_hook() 180 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) in thresh_exec_hook() argument 186 if (wq->thresh == NO_THRESHOLD) in thresh_exec_hook() 189 atomic_dec(&wq->pending); in thresh_exec_hook() 190 spin_lock(&wq->thres_lock); in thresh_exec_hook() 195 wq->count++; in thresh_exec_hook() 196 wq->count %= (wq->thresh / 4); in thresh_exec_hook() [all …]
|
D | async-thread.h | 39 struct __btrfs_workqueue *wq; member 76 void btrfs_queue_work(struct btrfs_workqueue *wq, 78 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); 79 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
|
/linux-4.1.27/include/linux/ |
D | wait.h | 212 #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ argument 225 long __int = prepare_to_wait_event(&wq, &__wait, state);\ 233 abort_exclusive_wait(&wq, &__wait, \ 242 finish_wait(&wq, &__wait); \ 246 #define __wait_event(wq, condition) \ argument 247 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 262 #define wait_event(wq, condition) \ argument 267 __wait_event(wq, condition); \ 270 #define __io_wait_event(wq, condition) \ argument 271 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ [all …]
|
D | workqueue.h | 118 struct workqueue_struct *wq; member 421 extern void destroy_workqueue(struct workqueue_struct *wq); 425 int apply_workqueue_attrs(struct workqueue_struct *wq, 428 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 430 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 432 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 435 extern void flush_workqueue(struct workqueue_struct *wq); 436 extern void drain_workqueue(struct workqueue_struct *wq); 450 extern void workqueue_set_max_active(struct workqueue_struct *wq, 453 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); [all …]
|
D | freezer.h | 250 #define wait_event_freezekillable_unsafe(wq, condition) \ argument 254 __retval = wait_event_killable(wq, (condition)); \ 296 #define wait_event_freezekillable_unsafe(wq, condition) \ argument 297 wait_event_killable(wq, condition)
|
D | padata.h | 155 struct workqueue_struct *wq; member 168 struct workqueue_struct *wq); 169 extern struct padata_instance *padata_alloc(struct workqueue_struct *wq,
|
D | isdn_ppp.h | 165 wait_queue_head_t wq; member
|
D | tty.h | 689 #define wait_event_interruptible_tty(tty, wq, condition) \ argument 693 __ret = __wait_event_interruptible_tty(tty, wq, \ 698 #define __wait_event_interruptible_tty(tty, wq, condition) \ argument 699 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
|
/linux-4.1.27/kernel/ |
D | workqueue.c | 200 struct workqueue_struct *wq; /* I: the owning workqueue */ member 340 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 350 #define assert_rcu_or_wq_mutex(wq) \ argument 352 lockdep_is_held(&wq->mutex), \ 355 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ argument 357 lockdep_is_held(&wq->mutex) || \ 410 #define for_each_pwq(pwq, wq) \ argument 411 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 412 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ 572 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, in unbound_pwq_by_node() argument [all …]
|
D | cpu.c | 63 wait_queue_head_t wq; member 77 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), 128 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) in put_online_cpus() 129 wake_up(&cpu_hotplug.wq); in put_online_cpus() 167 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); in cpu_hotplug_begin() 173 finish_wait(&cpu_hotplug.wq, &wait); in cpu_hotplug_begin()
|
D | padata.c | 143 queue_work_on(target_cpu, pinst->wq, &queue->work); in padata_do_parallel() 266 queue_work_on(cb_cpu, pinst->wq, &squeue->work); in padata_reorder() 1022 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) in padata_alloc_possible() argument 1024 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); in padata_alloc_possible() 1036 struct padata_instance *padata_alloc(struct workqueue_struct *wq, in padata_alloc() argument 1064 pinst->wq = wq; in padata_alloc()
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
D | t4.h | 340 static inline int t4_rqes_posted(struct t4_wq *wq) in t4_rqes_posted() argument 342 return wq->rq.in_use; in t4_rqes_posted() 345 static inline int t4_rq_empty(struct t4_wq *wq) in t4_rq_empty() argument 347 return wq->rq.in_use == 0; in t4_rq_empty() 350 static inline int t4_rq_full(struct t4_wq *wq) in t4_rq_full() argument 352 return wq->rq.in_use == (wq->rq.size - 1); in t4_rq_full() 355 static inline u32 t4_rq_avail(struct t4_wq *wq) in t4_rq_avail() argument 357 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail() 360 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) in t4_rq_produce() argument 362 wq->rq.in_use++; in t4_rq_produce() [all …]
|
D | cq.c | 185 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) in insert_recv_cqe() argument 190 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe() 196 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe() 202 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() argument 205 int in_use = wq->rq.in_use - count; in c4iw_flush_rq() 209 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq() 211 insert_recv_cqe(wq, cq); in c4iw_flush_rq() 217 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() argument 223 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe() 229 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe() [all …]
|
D | qp.c | 149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in destroy_qp() argument 157 wq->rq.memsize, wq->rq.queue, in destroy_qp() 158 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp() 159 dealloc_sq(rdev, &wq->sq); in destroy_qp() 160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp() 161 kfree(wq->rq.sw_rq); in destroy_qp() 162 kfree(wq->sq.sw_sq); in destroy_qp() 163 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp() 164 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp() 168 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in create_qp() argument [all …]
|
D | device.c | 117 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) in c4iw_log_wr_stats() argument 122 if (!wq->rdev->wr_log) in c4iw_log_wr_stats() 125 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & in c4iw_log_wr_stats() 126 (wq->rdev->wr_log_size - 1); in c4iw_log_wr_stats() 127 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); in c4iw_log_wr_stats() 132 le.qid = wq->sq.qid; in c4iw_log_wr_stats() 134 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts; in c4iw_log_wr_stats() 135 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; in c4iw_log_wr_stats() 138 le.qid = wq->rq.qid; in c4iw_log_wr_stats() 140 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts; in c4iw_log_wr_stats() [all …]
|
D | iw_cxgb4.h | 484 struct t4_wq wq; member 1018 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); 1020 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); 1035 extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
|
/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
D | cxio_hal.c | 275 struct t3_wq *wq, struct cxio_ucontext *uctx) in cxio_create_qp() argument 277 int depth = 1UL << wq->size_log2; in cxio_create_qp() 278 int rqsize = 1UL << wq->rq_size_log2; in cxio_create_qp() 280 wq->qpid = get_qpid(rdev_p, uctx); in cxio_create_qp() 281 if (!wq->qpid) in cxio_create_qp() 284 wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL); in cxio_create_qp() 285 if (!wq->rq) in cxio_create_qp() 288 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize); in cxio_create_qp() 289 if (!wq->rq_addr) in cxio_create_qp() 292 wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL); in cxio_create_qp() [all …]
|
D | iwch_qp.c | 150 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) in build_fastreg() argument 174 wqe = (union t3_wr *)(wq->queue + in build_fastreg() 175 Q_PTR2IDX((wq->wptr+1), wq->size_log2)); in build_fastreg() 177 Q_GENBIT(wq->wptr + 1, wq->size_log2), in build_fastreg() 280 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, in build_rdma_recv() 281 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; in build_rdma_recv() 282 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, in build_rdma_recv() 283 qhp->wq.rq_size_log2)].pbl_addr = 0; in build_rdma_recv() 343 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, in build_zero_stag_recv() 344 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; in build_zero_stag_recv() [all …]
|
D | iwch_cq.c | 49 struct t3_wq *wq; in iwch_poll_cq_one() local 62 wq = NULL; in iwch_poll_cq_one() 65 wq = &(qhp->wq); in iwch_poll_cq_one() 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, in iwch_poll_cq_one() 190 if (wq) in iwch_poll_cq_one()
|
D | cxio_hal.h | 165 int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq, 167 int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 190 int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); 191 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); 192 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 193 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
D | iwch_ev.c | 66 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); in post_qp_event() 141 __func__, qhp->wq.qpid, qhp->ep); in iwch_ev_dispatch() 145 qhp->wq.qpid); in iwch_ev_dispatch() 222 CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid); in iwch_ev_dispatch()
|
D | iwch_provider.c | 872 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); in iwch_destroy_qp() 879 cxio_destroy_qp(&rhp->rdev, &qhp->wq, in iwch_destroy_qp() 883 ib_qp, qhp->wq.qpid, qhp); in iwch_destroy_qp() 947 qhp->wq.size_log2 = ilog2(wqsize); in iwch_create_qp() 948 qhp->wq.rq_size_log2 = ilog2(rqsize); in iwch_create_qp() 949 qhp->wq.sq_size_log2 = ilog2(sqsize); in iwch_create_qp() 950 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, in iwch_create_qp() 987 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) { in iwch_create_qp() 988 cxio_destroy_qp(&rhp->rdev, &qhp->wq, in iwch_create_qp() 1011 uresp.qpid = qhp->wq.qpid; in iwch_create_qp() [all …]
|
D | cxio_wr.h | 747 static inline void cxio_set_wq_in_error(struct t3_wq *wq) in cxio_set_wq_in_error() argument 749 wq->queue->wq_in_err.err |= 1; in cxio_set_wq_in_error() 752 static inline void cxio_disable_wq_db(struct t3_wq *wq) in cxio_disable_wq_db() argument 754 wq->queue->wq_in_err.err |= 2; in cxio_disable_wq_db() 757 static inline void cxio_enable_wq_db(struct t3_wq *wq) in cxio_enable_wq_db() argument 759 wq->queue->wq_in_err.err &= ~2; in cxio_enable_wq_db() 762 static inline int cxio_wq_db_enabled(struct t3_wq *wq) in cxio_wq_db_enabled() argument 764 return !(wq->queue->wq_in_err.err & 2); in cxio_wq_db_enabled()
|
D | iwch.c | 70 cxio_disable_wq_db(&qhp->wq); in disable_qp_db() 79 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid); in enable_qp_db() 80 cxio_enable_wq_db(&qhp->wq); in enable_qp_db()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_srq.c | 52 struct ipath_rwq *wq; in ipath_post_srq_receive() local 68 wq = srq->rq.wq; in ipath_post_srq_receive() 69 next = wq->head + 1; in ipath_post_srq_receive() 72 if (next == wq->tail) { in ipath_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive() 86 wq->head = next; in ipath_post_srq_receive() 139 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz); in ipath_create_srq() 140 if (!srq->rq.wq) { in ipath_create_srq() 156 srq->rq.wq); in ipath_create_srq() 175 srq->rq.wq->head = 0; in ipath_create_srq() [all …]
|
D | ipath_qp.c | 360 if (qp->r_rq.wq) { in ipath_reset_qp() 361 qp->r_rq.wq->head = 0; in ipath_reset_qp() 362 qp->r_rq.wq->tail = 0; in ipath_reset_qp() 410 if (qp->r_rq.wq) { in ipath_error_qp() 411 struct ipath_rwq *wq; in ipath_error_qp() local 418 wq = qp->r_rq.wq; in ipath_error_qp() 419 head = wq->head; in ipath_error_qp() 422 tail = wq->tail; in ipath_error_qp() 431 wq->tail = tail; in ipath_error_qp() 690 struct ipath_rwq *wq = qp->r_rq.wq; in ipath_compute_aeth() local [all …]
|
D | ipath_ud.c | 60 struct ipath_rwq *wq; in ipath_ud_loopback() local 123 wq = rq->wq; in ipath_ud_loopback() 124 tail = wq->tail; in ipath_ud_loopback() 128 if (unlikely(tail == wq->head)) { in ipath_ud_loopback() 148 wq->tail = tail; in ipath_ud_loopback() 157 n = wq->head; in ipath_ud_loopback()
|
D | ipath_ruc.c | 170 struct ipath_rwq *wq; in ipath_get_rwqe() local 193 wq = rq->wq; in ipath_get_rwqe() 194 tail = wq->tail; in ipath_get_rwqe() 199 if (unlikely(tail == wq->head)) { in ipath_get_rwqe() 213 wq->tail = tail; in ipath_get_rwqe() 224 n = wq->head; in ipath_get_rwqe()
|
D | ipath_verbs.h | 310 struct ipath_rwqe wq[0]; member 314 struct ipath_rwq *wq; member 497 ((char *) rq->wq->wq + in get_rwqe_ptr()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_srq.c | 52 struct qib_rwq *wq; in qib_post_srq_receive() local 68 wq = srq->rq.wq; in qib_post_srq_receive() 69 next = wq->head + 1; in qib_post_srq_receive() 72 if (next == wq->tail) { in qib_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive() 86 wq->head = next; in qib_post_srq_receive() 136 srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz); in qib_create_srq() 137 if (!srq->rq.wq) { in qib_create_srq() 152 srq->rq.wq); in qib_create_srq() 171 srq->rq.wq->head = 0; in qib_create_srq() [all …]
|
D | qib_qp.c | 411 if (qp->r_rq.wq) { in qib_reset_qp() 412 qp->r_rq.wq->head = 0; in qib_reset_qp() 413 qp->r_rq.wq->tail = 0; in qib_reset_qp() 529 if (qp->r_rq.wq) { in qib_error_qp() 530 struct qib_rwq *wq; in qib_error_qp() local 537 wq = qp->r_rq.wq; in qib_error_qp() 538 head = wq->head; in qib_error_qp() 541 tail = wq->tail; in qib_error_qp() 550 wq->tail = tail; in qib_error_qp() 924 struct qib_rwq *wq = qp->r_rq.wq; in qib_compute_aeth() local [all …]
|
D | qib_ruc.c | 142 struct qib_rwq *wq; in qib_get_rwqe() local 165 wq = rq->wq; in qib_get_rwqe() 166 tail = wq->tail; in qib_get_rwqe() 170 if (unlikely(tail == wq->head)) { in qib_get_rwqe() 184 wq->tail = tail; in qib_get_rwqe() 200 n = wq->head; in qib_get_rwqe()
|
D | qib_verbs.h | 371 struct qib_rwqe wq[0]; member 375 struct qib_rwq *wq; member 625 ((char *) rq->wq->wq + in get_rwqe_ptr()
|
/linux-4.1.27/lib/raid6/ |
D | int.uc | 88 unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 95 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 99 w2$$ = MASK(wq$$); 100 w1$$ = SHLBYTE(wq$$); 103 wq$$ = w1$$ ^ wd$$; 106 *(unative_t *)&q[d+NSIZE*$$] = wq$$; 117 unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 125 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 129 w2$$ = MASK(wq$$); 130 w1$$ = SHLBYTE(wq$$); [all …]
|
D | tilegx.uc | 56 u64 wd$$, wq$$, wp$$, w1$$, w2$$; 66 wq$$ = wp$$ = *z0ptr++; 70 w2$$ = MASK(wq$$); 71 w1$$ = SHLBYTE(wq$$); 74 wq$$ = w1$$ ^ wd$$; 77 *q++ = wq$$;
|
D | neon.uc | 58 register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 66 wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); 70 w2$$ = MASK(wq$$); 71 w1$$ = SHLBYTE(wq$$); 75 wq$$ = veorq_u8(w1$$, wd$$); 78 vst1q_u8(&q[d+NSIZE*$$], wq$$);
|
D | altivec.uc | 74 unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 82 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 86 w2$$ = MASK(wq$$); 87 w1$$ = SHLBYTE(wq$$); 90 wq$$ = vec_xor(w1$$, wd$$); 93 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
|
/linux-4.1.27/Documentation/ |
D | workqueue.txt | 21 is needed and the workqueue (wq) API is the most commonly used 37 In the original wq implementation, a multi threaded (MT) wq had one 38 worker thread per CPU and a single threaded (ST) wq had one worker 39 thread system-wide. A single MT wq needed to keep around the same 41 wq users over the years and with the number of CPU cores continuously 45 Although MT wq wasted a lot of resource, the level of concurrency 47 MT wq albeit less severe on MT. Each wq maintained its own separate 48 worker pool. A MT wq could provide only one execution context per CPU 49 while a ST wq one for the whole system. Work items had to compete for 55 choosing to use ST wq for polling PIOs and accepting an unnecessary [all …]
|
D | padata.txt | 16 struct padata_instance *padata_alloc(struct workqueue_struct *wq, 23 The workqueue wq is where the work will actually be done; it should be 29 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq);
|
/linux-4.1.27/drivers/md/bcache/ |
D | closure.h | 151 struct workqueue_struct *wq; member 239 struct workqueue_struct *wq) in set_closure_fn() argument 244 cl->wq = wq; in set_closure_fn() 251 struct workqueue_struct *wq = cl->wq; in closure_queue() local 252 if (wq) { in closure_queue() 254 BUG_ON(!queue_work(wq, &cl->work)); in closure_queue() 379 struct workqueue_struct *wq, in closure_call() argument 383 continue_at_nobarrier(cl, fn, wq); in closure_call()
|
D | movinggc.c | 116 continue_at(cl, write_moving_finish, op->wq); in write_moving() 126 continue_at(cl, write_moving, io->op.wq); in read_moving_submit() 161 io->op.wq = c->moving_gc_wq; in read_moving()
|
D | request.h | 8 struct workqueue_struct *wq; member
|
D | request.c | 91 continue_at(cl, bch_data_insert_start, op->wq); in bch_data_insert_keys() 140 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_invalidate() 183 set_closure_fn(cl, bch_data_insert_error, op->wq); in bch_data_insert_endio() 219 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_insert_start() 256 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_insert_start() 285 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_insert_start() 663 s->iop.wq = bcache_wq; in search_alloc()
|
/linux-4.1.27/kernel/sched/ |
D | wait.c | 387 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, in __wait_on_bit() argument 393 prepare_to_wait(wq, &q->wait, mode); in __wait_on_bit() 397 finish_wait(wq, &q->wait); in __wait_on_bit() 405 wait_queue_head_t *wq = bit_waitqueue(word, bit); in out_of_line_wait_on_bit() local 408 return __wait_on_bit(wq, &wait, action, mode); in out_of_line_wait_on_bit() 416 wait_queue_head_t *wq = bit_waitqueue(word, bit); in out_of_line_wait_on_bit_timeout() local 420 return __wait_on_bit(wq, &wait, action, mode); in out_of_line_wait_on_bit_timeout() 425 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, in __wait_on_bit_lock() argument 431 prepare_to_wait_exclusive(wq, &q->wait, mode); in __wait_on_bit_lock() 437 abort_exclusive_wait(wq, &q->wait, mode, &q->key); in __wait_on_bit_lock() [all …]
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
D | lustre_lib.h | 531 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \ argument 543 l_add_wait(&wq, &__wait); \ 609 remove_wait_queue(&wq, &__wait); \ 614 #define l_wait_event(wq, condition, info) \ argument 619 __l_wait_event(wq, condition, __info, \ 624 #define l_wait_event_exclusive(wq, condition, info) \ argument 629 __l_wait_event(wq, condition, __info, \ 634 #define l_wait_event_exclusive_head(wq, condition, info) \ argument 639 __l_wait_event(wq, condition, __info, \ 644 #define l_wait_condition(wq, condition) \ argument [all …]
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
D | radeon_sa.c | 56 init_waitqueue_head(&sa_manager->wq); in radeon_sa_bo_manager_init() 333 spin_lock(&sa_manager->wq.lock); in radeon_sa_bo_new() 345 spin_unlock(&sa_manager->wq.lock); in radeon_sa_bo_new() 355 spin_unlock(&sa_manager->wq.lock); in radeon_sa_bo_new() 359 spin_lock(&sa_manager->wq.lock); in radeon_sa_bo_new() 363 sa_manager->wq, in radeon_sa_bo_new() 370 spin_unlock(&sa_manager->wq.lock); in radeon_sa_bo_new() 386 spin_lock(&sa_manager->wq.lock); in radeon_sa_bo_free() 394 wake_up_all_locked(&sa_manager->wq); in radeon_sa_bo_free() 395 spin_unlock(&sa_manager->wq.lock); in radeon_sa_bo_free() [all …]
|
/linux-4.1.27/drivers/usb/chipidea/ |
D | otg.c | 121 ci->wq = create_freezable_workqueue("ci_otg"); in ci_hdrc_otg_init() 122 if (!ci->wq) { in ci_hdrc_otg_init() 139 if (ci->wq) { in ci_hdrc_otg_destroy() 140 flush_workqueue(ci->wq); in ci_hdrc_otg_destroy() 141 destroy_workqueue(ci->wq); in ci_hdrc_otg_destroy()
|
D | otg.h | 23 queue_work(ci->wq, &ci->work); in ci_otg_queue_work()
|
/linux-4.1.27/drivers/hid/ |
D | hid-elo.c | 35 static struct workqueue_struct *wq; variable 175 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); in elo_work() 248 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); in elo_probe() 262 flush_workqueue(wq); in elo_remove() 286 wq = create_singlethread_workqueue("elousb"); in elo_driver_init() 287 if (!wq) in elo_driver_init() 292 destroy_workqueue(wq); in elo_driver_init() 301 destroy_workqueue(wq); in elo_driver_exit()
|
/linux-4.1.27/drivers/power/ |
D | ipaq_micro_battery.c | 43 struct workqueue_struct *wq; member 91 queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); in micro_battery_work() 238 mb->wq = create_singlethread_workqueue("ipaq-battery-wq"); in micro_batt_probe() 239 if (!mb->wq) in micro_batt_probe() 244 queue_delayed_work(mb->wq, &mb->update, 1); in micro_batt_probe() 267 destroy_workqueue(mb->wq); in micro_batt_probe() 279 destroy_workqueue(mb->wq); in micro_batt_remove() 296 queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); in micro_batt_resume()
|
/linux-4.1.27/fs/jfs/ |
D | jfs_lock.h | 35 #define __SLEEP_COND(wq, cond, lock_cmd, unlock_cmd) \ argument 39 add_wait_queue(&wq, &__wait); \ 49 remove_wait_queue(&wq, &__wait); \
|
/linux-4.1.27/net/core/ |
D | stream.c | 31 struct socket_wq *wq; in sk_stream_write_space() local 37 wq = rcu_dereference(sk->sk_wq); in sk_stream_write_space() 38 if (wq_has_sleeper(wq)) in sk_stream_write_space() 39 wake_up_interruptible_poll(&wq->wait, POLLOUT | in sk_stream_write_space() 41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) in sk_stream_write_space()
|
D | sock.c | 1917 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, in __lock_sock() 1925 finish_wait(&sk->sk_lock.wq, &wait); in __lock_sock() 2213 struct socket_wq *wq; in sock_def_wakeup() local 2216 wq = rcu_dereference(sk->sk_wq); in sock_def_wakeup() 2217 if (wq_has_sleeper(wq)) in sock_def_wakeup() 2218 wake_up_interruptible_all(&wq->wait); in sock_def_wakeup() 2224 struct socket_wq *wq; in sock_def_error_report() local 2227 wq = rcu_dereference(sk->sk_wq); in sock_def_error_report() 2228 if (wq_has_sleeper(wq)) in sock_def_error_report() 2229 wake_up_interruptible_poll(&wq->wait, POLLERR); in sock_def_error_report() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | cq.c | 102 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) in get_umr_comp() argument 104 switch (wq->wr_data[idx]) { in get_umr_comp() 121 struct mlx5_ib_wq *wq, int idx) in handle_good_req() argument 160 wc->opcode = get_umr_comp(wq, idx); in handle_good_req() 175 struct mlx5_ib_wq *wq; in handle_responder() local 197 wq = &qp->rq; in handle_responder() 198 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in handle_responder() 199 ++wq->tail; in handle_responder() 410 struct mlx5_ib_wq *wq; in mlx5_poll_one() local 466 wq = &(*cur_qp)->sq; in mlx5_poll_one() [all …]
|
D | mr.c | 372 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func() 377 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func() 380 queue_work(cache->wq, &ent->work); in __cache_work_func() 388 queue_work(cache->wq, &ent->work); in __cache_work_func() 390 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func() 438 queue_work(cache->wq, &ent->work); in alloc_cached_mr() 443 queue_work(cache->wq, &ent->work); in alloc_cached_mr() 476 queue_work(cache->wq, &ent->work); in free_cached_mr() 573 cache->wq = create_singlethread_workqueue("mkey_cache"); in mlx5_mr_cache_init() 574 if (!cache->wq) { in mlx5_mr_cache_init() [all …]
|
D | qp.c | 126 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe() local 134 if (wq->wqe_cnt == 0) { in mlx5_ib_read_user_wqe() 140 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift); in mlx5_ib_read_user_wqe() 141 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift); in mlx5_ib_read_user_wqe() 161 wqe_length = 1 << wq->wqe_shift; in mlx5_ib_read_user_wqe() 167 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, in mlx5_ib_read_user_wqe() 1825 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx5_wq_overflow() argument 1830 cur = wq->head - wq->tail; in mlx5_wq_overflow() 1831 if (likely(cur + nreq < wq->max_post)) in mlx5_wq_overflow() 1836 cur = wq->head - wq->tail; in mlx5_wq_overflow() [all …]
|
/linux-4.1.27/drivers/i2c/busses/ |
D | i2c-taos-evm.c | 38 static DECLARE_WAIT_QUEUE_HEAD(wq); 112 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, in taos_smbus_xfer() 163 wake_up_interruptible(&wq); in taos_interrupt() 168 wake_up_interruptible(&wq); in taos_interrupt() 175 wake_up_interruptible(&wq); in taos_interrupt() 228 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, in taos_connect() 250 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, in taos_connect()
|
D | i2c-ibm_iic.h | 48 wait_queue_head_t wq; member
|
D | i2c-ibm_iic.c | 336 wake_up_interruptible(&dev->wq); in iic_handler() 417 ret = wait_event_interruptible_timeout(dev->wq, in iic_wait_for_tc() 717 init_waitqueue_head(&dev->wq); in iic_probe()
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 164 priv->wq = create_singlethread_workqueue("ipoib_wq"); in ipoib_transport_dev_init() 165 if (!priv->wq) { in ipoib_transport_dev_init() 250 destroy_workqueue(priv->wq); in ipoib_transport_dev_init() 251 priv->wq = NULL; in ipoib_transport_dev_init() 282 if (priv->wq) { in ipoib_transport_dev_cleanup() 283 flush_workqueue(priv->wq); in ipoib_transport_dev_cleanup() 284 destroy_workqueue(priv->wq); in ipoib_transport_dev_cleanup() 285 priv->wq = NULL; in ipoib_transport_dev_cleanup()
|
D | ipoib_multicast.c | 97 queue_delayed_work(priv->wq, &priv->mcast_task, 0); in __ipoib_mcast_schedule_join_thread() 104 queue_delayed_work(priv->wq, &priv->mcast_task, HZ); in __ipoib_mcast_schedule_join_thread() 106 queue_delayed_work(priv->wq, &priv->mcast_task, 0); in __ipoib_mcast_schedule_join_thread() 391 queue_work(priv->wq, &priv->carrier_on_task); in ipoib_mcast_join_complete() 612 queue_delayed_work(priv->wq, &priv->mcast_task, in ipoib_mcast_join_task() 649 flush_workqueue(priv->wq); in ipoib_mcast_stop_thread()
|
D | ipoib_cm.c | 477 queue_delayed_work(priv->wq, in ipoib_cm_req_handler() 579 queue_work(priv->wq, &priv->cm.rx_reap_task); in ipoib_cm_handle_rx_wc() 606 queue_work(priv->wq, &priv->cm.rx_reap_task); in ipoib_cm_handle_rx_wc() 830 queue_work(priv->wq, &priv->cm.reap_task); in ipoib_cm_handle_tx_wc() 1258 queue_work(priv->wq, &priv->cm.reap_task); in ipoib_cm_tx_handler() 1287 queue_work(priv->wq, &priv->cm.start_task); in ipoib_cm_create_tx() 1298 queue_work(priv->wq, &priv->cm.reap_task); in ipoib_cm_destroy_tx() 1420 queue_work(priv->wq, &priv->cm.skb_task); in ipoib_cm_skb_too_long() 1453 queue_delayed_work(priv->wq, in ipoib_cm_stale_task()
|
/linux-4.1.27/drivers/mtd/chips/ |
D | cfi_cmdset_0020.c | 159 init_waitqueue_head(&(cfi->chips[i].wq)); in cfi_cmdset_0020() 297 wake_up(&chip->wq); in do_read_onechip() 352 add_wait_queue(&chip->wq, &wait); in do_read_onechip() 355 remove_wait_queue(&chip->wq, &wait); in do_read_onechip() 377 wake_up(&chip->wq); in do_read_onechip() 485 add_wait_queue(&chip->wq, &wait); in do_write_buffer() 488 remove_wait_queue(&chip->wq, &wait); in do_write_buffer() 542 add_wait_queue(&chip->wq, &wait); in do_write_buffer() 545 remove_wait_queue(&chip->wq, &wait); in do_write_buffer() 595 wake_up(&chip->wq); in do_write_buffer() [all …]
|
D | cfi_cmdset_0002.c | 666 init_waitqueue_head(&(cfi->chips[i].wq)); in cfi_cmdset_0002() 872 add_wait_queue(&chip->wq, &wait); in get_chip() 875 remove_wait_queue(&chip->wq, &wait); in get_chip() 907 wake_up(&chip->wq); in put_chip() 1022 add_wait_queue(&chip->wq, &wait); in xip_udelay() 1025 remove_wait_queue(&chip->wq, &wait); in xip_udelay() 1222 add_wait_queue(&chip->wq, &wait); in do_read_secsi_onechip() 1227 remove_wait_queue(&chip->wq, &wait); in do_read_secsi_onechip() 1241 wake_up(&chip->wq); in do_read_secsi_onechip() 1619 add_wait_queue(&chip->wq, &wait); in do_write_oneword() [all …]
|
D | cfi_cmdset_0001.c | 573 init_waitqueue_head(&(cfi->chips[i].wq)); in cfi_cmdset_0001() 762 init_waitqueue_head(&chip->wq); in cfi_intelext_partition_fixup() 883 add_wait_queue(&chip->wq, &wait); in chip_ready() 886 remove_wait_queue(&chip->wq, &wait); in chip_ready() 967 add_wait_queue(&chip->wq, &wait); in get_chip() 970 remove_wait_queue(&chip->wq, &wait); in get_chip() 1007 wake_up(&chip->wq); in put_chip() 1021 wake_up(&chip->wq); in put_chip() 1056 wake_up(&chip->wq); in put_chip() 1187 add_wait_queue(&chip->wq, &wait); in xip_wait_for_operation() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | cq.c | 601 struct mlx4_ib_wq *wq; in mlx4_ib_qp_sw_comp() local 605 wq = is_send ? &qp->sq : &qp->rq; in mlx4_ib_qp_sw_comp() 606 cur = wq->head - wq->tail; in mlx4_ib_qp_sw_comp() 612 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in mlx4_ib_qp_sw_comp() 615 wq->tail++; in mlx4_ib_qp_sw_comp() 653 struct mlx4_ib_wq *wq; in mlx4_ib_poll_one() local 740 wq = &(*cur_qp)->sq; in mlx4_ib_poll_one() 743 wq->tail += (u16) (wqe_ctr - (u16) wq->tail); in mlx4_ib_poll_one() 745 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in mlx4_ib_poll_one() 746 ++wq->tail; in mlx4_ib_poll_one() [all …]
|
D | alias_GUID.c | 433 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, in aliasguid_query_handler() 565 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in set_guid_rec() 627 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in mlx4_ib_invalidate_all_guid_record() 786 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, in mlx4_ib_init_alias_guid_work() 822 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_destroy_alias_guid_service() 823 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_destroy_alias_guid_service() 878 dev->sriov.alias_guid.ports_guid[i].wq = in mlx4_ib_init_alias_guid_service() 880 if (!dev->sriov.alias_guid.ports_guid[i].wq) { in mlx4_ib_init_alias_guid_service() 891 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_init_alias_guid_service() 892 dev->sriov.alias_guid.ports_guid[i].wq = NULL; in mlx4_ib_init_alias_guid_service()
|
D | mad.c | 1113 queue_work(ctx->wq, &ctx->work); in mlx4_ib_tunnel_comp_handler() 1847 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; in create_pv_resources() 1858 ctx->wq = NULL; in create_pv_resources() 1899 flush_workqueue(ctx->wq); in destroy_pv_resources() 1998 ctx->wq = create_singlethread_workqueue(name); in mlx4_ib_alloc_demux_ctx() 1999 if (!ctx->wq) { in mlx4_ib_alloc_demux_ctx() 2016 destroy_workqueue(ctx->wq); in mlx4_ib_alloc_demux_ctx() 2017 ctx->wq = NULL; in mlx4_ib_alloc_demux_ctx() 2033 flush_workqueue(sqp_ctx->wq); in mlx4_ib_free_sqp_ctx() 2064 flush_workqueue(ctx->wq); in mlx4_ib_free_demux_ctx() [all …]
|
/linux-4.1.27/drivers/staging/i2o/ |
D | exec-osm.c | 49 wait_queue_head_t *wq; /* Pointer to Wait queue */ member 125 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in i2o_msg_post_wait_mem() 152 wait->wq = &wq; in i2o_msg_post_wait_mem() 165 wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ); in i2o_msg_post_wait_mem() 169 wait->wq = NULL; in i2o_msg_post_wait_mem() 240 if (wait->wq) in i2o_msg_post_wait_complete() 257 wake_up_interruptible(wait->wq); in i2o_msg_post_wait_complete()
|
/linux-4.1.27/net/sunrpc/ |
D | svcsock.c | 408 wait_queue_head_t *wq = sk_sleep(sk); in svc_udp_data_ready() local 417 if (wq && waitqueue_active(wq)) in svc_udp_data_ready() 418 wake_up_interruptible(wq); in svc_udp_data_ready() 427 wait_queue_head_t *wq = sk_sleep(sk); in svc_write_space() local 435 if (wq && waitqueue_active(wq)) { in svc_write_space() 438 wake_up_interruptible(wq); in svc_write_space() 766 wait_queue_head_t *wq; in svc_tcp_listen_data_ready() local 789 wq = sk_sleep(sk); in svc_tcp_listen_data_ready() 790 if (wq && waitqueue_active(wq)) in svc_tcp_listen_data_ready() 791 wake_up_interruptible_all(wq); in svc_tcp_listen_data_ready() [all …]
|
D | sched.c | 289 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); in rpc_complete_task() local 296 spin_lock_irqsave(&wq->lock, flags); in rpc_complete_task() 299 if (waitqueue_active(wq)) in rpc_complete_task() 300 __wake_up_locked_key(wq, TASK_NORMAL, &k); in rpc_complete_task() 301 spin_unlock_irqrestore(&wq->lock, flags); in rpc_complete_task() 1066 struct workqueue_struct *wq; in rpciod_start() local 1073 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); in rpciod_start() 1074 rpciod_workqueue = wq; in rpciod_start() 1080 struct workqueue_struct *wq = NULL; in rpciod_stop() local 1086 wq = rpciod_workqueue; in rpciod_stop() [all …]
|
/linux-4.1.27/include/trace/events/ |
D | btrfs.h | 997 __field( void *, wq ) 1006 __entry->wq = work->wq; 1015 __entry->work, __entry->normal_work, __entry->wq, 1067 TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), 1069 TP_ARGS(wq, name, high), 1072 __field( void *, wq ) 1078 __entry->wq = wq; 1086 __entry->wq) 1091 TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), 1093 TP_ARGS(wq, name, high) [all …]
|
D | workqueue.h | 55 __entry->workqueue = pwq->wq;
|
/linux-4.1.27/drivers/thunderbolt/ |
D | tb.c | 296 queue_work(tb->wq, &ev->work); in tb_schedule_hotplug_handler() 332 if (tb->wq) { in thunderbolt_shutdown_and_free() 333 flush_workqueue(tb->wq); in thunderbolt_shutdown_and_free() 334 destroy_workqueue(tb->wq); in thunderbolt_shutdown_and_free() 335 tb->wq = NULL; in thunderbolt_shutdown_and_free() 366 tb->wq = alloc_ordered_workqueue("thunderbolt", 0); in thunderbolt_alloc_and_start() 367 if (!tb->wq) in thunderbolt_alloc_and_start()
|
/linux-4.1.27/drivers/usb/misc/ |
D | appledisplay.c | 88 static struct workqueue_struct *wq; variable 125 queue_delayed_work(wq, &pdata->work, 0); in appledisplay_complete() 368 wq = create_singlethread_workqueue("appledisplay"); in appledisplay_init() 369 if (!wq) { in appledisplay_init() 379 flush_workqueue(wq); in appledisplay_exit() 380 destroy_workqueue(wq); in appledisplay_exit()
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 489 struct mthca_wq *wq; in mthca_poll_one() local 539 wq = &(*cur_qp)->sq; in mthca_poll_one() 541 >> wq->wqe_shift); in mthca_poll_one() 547 wq = NULL; in mthca_poll_one() 553 wq = &(*cur_qp)->rq; in mthca_poll_one() 555 wqe_index = wqe >> wq->wqe_shift; in mthca_poll_one() 562 wqe_index = wq->max - 1; in mthca_poll_one() 566 if (wq) { in mthca_poll_one() 567 if (wq->last_comp < wqe_index) in mthca_poll_one() 568 wq->tail += wqe_index - wq->last_comp; in mthca_poll_one() [all …]
|
D | mthca_qp.c | 229 static void mthca_wq_reset(struct mthca_wq *wq) in mthca_wq_reset() argument 231 wq->next_ind = 0; in mthca_wq_reset() 232 wq->last_comp = wq->max - 1; in mthca_wq_reset() 233 wq->head = 0; in mthca_wq_reset() 234 wq->tail = 0; in mthca_wq_reset() 1545 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument 1551 cur = wq->head - wq->tail; in mthca_wq_overflow() 1552 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow() 1557 cur = wq->head - wq->tail; in mthca_wq_overflow() 1560 return cur + nreq >= wq->max; in mthca_wq_overflow()
|
/linux-4.1.27/drivers/nfc/ |
D | nfcsim.c | 63 static struct workqueue_struct *wq; variable 211 queue_delayed_work(wq, &dev->poll_work, 0); in nfcsim_start_poll() 326 queue_delayed_work(wq, &peer->recv_work, msecs_to_jiffies(5)); in nfcsim_tx() 431 queue_delayed_work(wq, &dev->poll_work, msecs_to_jiffies(200)); in nfcsim_wq_poll() 491 wq = alloc_ordered_workqueue("nfcsim", 0); in nfcsim_init() 492 if (!wq) { in nfcsim_init() 533 destroy_workqueue(wq); in nfcsim_exit()
|
D | pn533.c | 357 struct workqueue_struct *wq; member 749 queue_work(dev->wq, &dev->cmd_complete_work); in pn533_recv_response() 803 queue_work(dev->wq, &dev->cmd_complete_work); in pn533_recv_ack() 1066 queue_work(dev->wq, &dev->cmd_work); in pn533_wq_cmd_complete() 1651 queue_work(dev->wq, &dev->mi_tm_rx_work); in pn533_tm_get_data_complete() 1733 queue_work(dev->wq, &dev->cmd_work); in pn533_wq_tm_mi_send() 1794 queue_work(dev->wq, &dev->tg_work); in pn533_init_target_complete() 1809 queue_delayed_work(dev->wq, &dev->poll_work, in pn533_listen_mode_timer() 1828 queue_delayed_work(dev->wq, &dev->poll_work, in pn533_rf_complete() 1876 queue_work(dev->wq, &dev->rf_work); in pn533_poll_dep_complete() [all …]
|
/linux-4.1.27/drivers/media/pci/ddbridge/ |
D | ddbridge.h | 85 wait_queue_head_t wq; member 113 wait_queue_head_t wq; member 132 wait_queue_head_t wq; member
|
D | ddbridge-core.c | 89 stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); in ddb_i2c_cmd() 186 init_waitqueue_head(&i2c->wq); in ddb_i2c_init() 911 output->wq, ddb_output_free(output) >= 188) < 0) in ts_write() 938 input->wq, ddb_input_avail(input) >= 188) < 0) in ts_read() 1019 wake_up(&input->wq); in input_tasklet() 1034 wake_up(&output->wq); in output_tasklet() 1216 init_waitqueue_head(&input->wq); in ddb_input_init() 1232 init_waitqueue_head(&output->wq); in ddb_output_init() 1280 wake_up(&i2c->wq); in irq_handle_i2c()
|
/linux-4.1.27/drivers/media/i2c/ |
D | saa7110.c | 63 wait_queue_head_t wq; member 199 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); in determine_norm() 201 finish_wait(&decoder->wq, &wait); in determine_norm() 234 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); in determine_norm() 236 finish_wait(&decoder->wq, &wait); in determine_norm() 426 init_waitqueue_head(&decoder->wq); in saa7110_probe()
|
D | msp3400-driver.c | 323 wake_up_interruptible(&state->wq); in msp_wake_thread() 330 add_wait_queue(&state->wq, &wait); in msp_sleep() 341 remove_wait_queue(&state->wq, &wait); in msp_sleep() 712 init_waitqueue_head(&state->wq); in msp_probe()
|
D | msp3400-driver.h | 102 wait_queue_head_t wq; member
|
/linux-4.1.27/crypto/ |
D | algif_aead.c | 100 struct socket_wq *wq; in aead_wmem_wakeup() local 106 wq = rcu_dereference(sk->sk_wq); in aead_wmem_wakeup() 107 if (wq_has_sleeper(wq)) in aead_wmem_wakeup() 108 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | in aead_wmem_wakeup() 149 struct socket_wq *wq; in aead_data_wakeup() local 157 wq = rcu_dereference(sk->sk_wq); in aead_data_wakeup() 158 if (wq_has_sleeper(wq)) in aead_data_wakeup() 159 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | in aead_data_wakeup()
|
D | pcrypt.c | 34 struct workqueue_struct *wq; member 458 pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, in pcrypt_init_padata() 460 if (!pcrypt->wq) in pcrypt_init_padata() 463 pcrypt->pinst = padata_alloc_possible(pcrypt->wq); in pcrypt_init_padata() 499 destroy_workqueue(pcrypt->wq); in pcrypt_init_padata() 513 destroy_workqueue(pcrypt->wq); in pcrypt_fini_padata()
|
D | algif_skcipher.c | 239 struct socket_wq *wq; in skcipher_wmem_wakeup() local 245 wq = rcu_dereference(sk->sk_wq); in skcipher_wmem_wakeup() 246 if (wq_has_sleeper(wq)) in skcipher_wmem_wakeup() 247 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | in skcipher_wmem_wakeup() 289 struct socket_wq *wq; in skcipher_data_wakeup() local 295 wq = rcu_dereference(sk->sk_wq); in skcipher_data_wakeup() 296 if (wq_has_sleeper(wq)) in skcipher_data_wakeup() 297 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | in skcipher_data_wakeup()
|
/linux-4.1.27/drivers/mtd/lpddr/ |
D | lpddr_cmds.c | 101 init_waitqueue_head(&chip->wq); in lpddr_cmdset() 159 add_wait_queue(&chip->wq, &wait); in wait_for_ready() 162 remove_wait_queue(&chip->wq, &wait); in wait_for_ready() 258 add_wait_queue(&chip->wq, &wait); in get_chip() 261 remove_wait_queue(&chip->wq, &wait); in get_chip() 325 add_wait_queue(&chip->wq, &wait); in chip_ready() 328 remove_wait_queue(&chip->wq, &wait); in chip_ready() 351 wake_up(&chip->wq); in put_chip() 365 wake_up(&chip->wq); in put_chip() 386 wake_up(&chip->wq); in put_chip()
|
/linux-4.1.27/arch/x86/kernel/ |
D | kvm.c | 94 wait_queue_head_t wq; member 144 init_waitqueue_head(&n.wq); in kvm_async_pf_task_wait() 150 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); in kvm_async_pf_task_wait() 169 finish_wait(&n.wq, &wait); in kvm_async_pf_task_wait() 181 else if (waitqueue_active(&n->wq)) in apf_task_wake_one() 182 wake_up(&n->wq); in apf_task_wake_one() 234 init_waitqueue_head(&n->wq); in kvm_async_pf_task_wake()
|
/linux-4.1.27/drivers/pci/hotplug/ |
D | pciehp_ctrl.c | 52 queue_work(p_slot->wq, &info->work); in queue_interrupt_event() 361 queue_work(p_slot->wq, &info->work); in pciehp_queue_pushbutton_work() 389 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); in handle_button_press_event() 450 queue_work(p_slot->wq, &info->work); in handle_surprise_event() 479 queue_work(p_slot->wq, &info->work); in handle_link_event() 492 queue_work(p_slot->wq, &info->work); in handle_link_event() 501 queue_work(p_slot->wq, &info->work); in handle_link_event()
|
D | shpchp_core.c | 131 slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number); in init_slots() 132 if (!slot->wq) { in init_slots() 168 destroy_workqueue(slot->wq); in init_slots() 189 destroy_workqueue(slot->wq); in cleanup_slots()
|
D | shpchp_ctrl.c | 54 queue_work(p_slot->wq, &info->work); in queue_interrupt_event() 458 queue_work(p_slot->wq, &info->work); in shpchp_queue_pushbutton_work() 506 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); in handle_button_press_event()
|
/linux-4.1.27/drivers/staging/android/ |
D | sync.c | 169 init_waitqueue_head(&fence->wq); in sync_fence_alloc() 187 wake_up_all(&fence->wq); in fence_check_cb_func() 342 spin_lock_irqsave(&fence->wq.lock, flags); in sync_fence_wait_async() 345 __add_wait_queue_tail(&fence->wq, &waiter->work); in sync_fence_wait_async() 346 spin_unlock_irqrestore(&fence->wq.lock, flags); in sync_fence_wait_async() 361 spin_lock_irqsave(&fence->wq.lock, flags); in sync_fence_cancel_async() 366 spin_unlock_irqrestore(&fence->wq.lock, flags); in sync_fence_cancel_async() 384 ret = wait_event_interruptible_timeout(fence->wq, in sync_fence_wait() 549 poll_wait(file, &fence->wq, wait); in sync_fence_poll()
|
D | sync_debug.c | 163 spin_lock_irqsave(&fence->wq.lock, flags); in sync_print_fence() 164 list_for_each_entry(pos, &fence->wq.task_list, task_list) { in sync_print_fence() 174 spin_unlock_irqrestore(&fence->wq.lock, flags); in sync_print_fence()
|
D | sync.h | 163 wait_queue_head_t wq; member
|
/linux-4.1.27/arch/arm/kvm/ |
D | psci.c | 71 wait_queue_head_t *wq; in kvm_psci_vcpu_on() local 119 wq = kvm_arch_vcpu_wq(vcpu); in kvm_psci_vcpu_on() 120 wake_up_interruptible(wq); in kvm_psci_vcpu_on()
|
/linux-4.1.27/Documentation/driver-model/ |
D | design-patterns.txt | 73 struct workqueue_struct *wq; 89 queue_work(foo->wq, &foo->offload); 97 foo->wq = create_singlethread_workqueue("foo-wq");
|
/linux-4.1.27/fs/nfs/blocklayout/ |
D | rpc_pipefs.c | 62 DECLARE_WAITQUEUE(wq, current); in bl_resolve_deviceid() 87 add_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid() 90 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid() 96 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid()
|
/linux-4.1.27/drivers/net/wireless/cw1200/ |
D | cw1200_spi.c | 43 wait_queue_head_t wq; member 205 add_wait_queue(&self->wq, &wait); in cw1200_spi_lock() 218 remove_wait_queue(&self->wq, &wait); in cw1200_spi_lock() 230 wake_up(&self->wq); in cw1200_spi_unlock() 413 init_waitqueue_head(&self->wq); in cw1200_spi_probe()
|
/linux-4.1.27/fs/ |
D | eventpoll.c | 193 wait_queue_head_t wq; member 506 static void ep_poll_safewake(wait_queue_head_t *wq) in ep_poll_safewake() argument 511 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); in ep_poll_safewake() 665 if (waitqueue_active(&ep->wq)) in ep_scan_ready_list() 666 wake_up_locked(&ep->wq); in ep_scan_ready_list() 949 init_waitqueue_head(&ep->wq); in ep_alloc() 1069 if (waitqueue_active(&ep->wq)) in ep_poll_callback() 1070 wake_up_locked(&ep->wq); in ep_poll_callback() 1343 if (waitqueue_active(&ep->wq)) in ep_insert() 1344 wake_up_locked(&ep->wq); in ep_insert() [all …]
|
D | inode.c | 1774 wait_queue_head_t *wq; in __wait_on_freeing_inode() local 1776 wq = bit_waitqueue(&inode->i_state, __I_NEW); in __wait_on_freeing_inode() 1777 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); in __wait_on_freeing_inode() 1781 finish_wait(wq, &wait.wait); in __wait_on_freeing_inode() 1920 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); in __inode_dio_wait() local 1924 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); in __inode_dio_wait() 1928 finish_wait(wq, &q.wait); in __inode_dio_wait()
|
/linux-4.1.27/drivers/sbus/char/ |
D | bbc_i2c.c | 129 add_wait_queue(&bp->wq, &wait); in wait_for_pin() 134 bp->wq, in wait_for_pin() 143 remove_wait_queue(&bp->wq, &wait); in wait_for_pin() 279 wake_up_interruptible(&bp->wq); in bbc_i2c_interrupt() 317 init_waitqueue_head(&bp->wq); in attach_one_i2c()
|
D | bbc_i2c.h | 61 wait_queue_head_t wq; member
|
/linux-4.1.27/drivers/staging/rtl8192e/rtl8192e/ |
D | rtl_ps.c | 69 queue_delayed_work_rsl(priv->rtllib->wq, in rtl8192_hw_wakeup() 115 queue_delayed_work_rsl(priv->rtllib->wq, in rtl8192_hw_to_sleep() 117 queue_delayed_work_rsl(priv->rtllib->wq, in rtl8192_hw_to_sleep() 208 queue_work_rsl(priv->rtllib->wq, in rtllib_ips_leave_wq()
|
/linux-4.1.27/drivers/gpu/drm/ |
D | drm_flip_work.c | 99 struct workqueue_struct *wq) in drm_flip_work_commit() argument 107 queue_work(wq, &work->worker); in drm_flip_work_commit()
|
/linux-4.1.27/fs/nfs/ |
D | callback.c | 123 DEFINE_WAIT(wq); in nfs41_callback_svc() 131 prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); in nfs41_callback_svc() 138 finish_wait(&serv->sv_cb_waitq, &wq); in nfs41_callback_svc() 146 finish_wait(&serv->sv_cb_waitq, &wq); in nfs41_callback_svc()
|
D | inode.c | 1950 struct workqueue_struct *wq; in nfsiod_start() local 1952 wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0); in nfsiod_start() 1953 if (wq == NULL) in nfsiod_start() 1955 nfsiod_workqueue = wq; in nfsiod_start() 1964 struct workqueue_struct *wq; in nfsiod_stop() local 1966 wq = nfsiod_workqueue; in nfsiod_stop() 1967 if (wq == NULL) in nfsiod_stop() 1970 destroy_workqueue(wq); in nfsiod_stop()
|
/linux-4.1.27/drivers/gpu/host1x/ |
D | intr.c | 124 wait_queue_head_t *wq = waiter->data; in action_wakeup() local 125 wake_up(wq); in action_wakeup() 130 wait_queue_head_t *wq = waiter->data; in action_wakeup_interruptible() local 131 wake_up_interruptible(wq); in action_wakeup_interruptible()
|
D | syncpt.c | 191 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in host1x_syncpt_wait() 230 &wq, waiter, &ref); in host1x_syncpt_wait() 242 int remain = wait_event_interruptible_timeout(wq, in host1x_syncpt_wait()
|
D | cdma.h | 53 struct delayed_work wq; /* work queue */ member
|
D | cdma.c | 190 schedule_delayed_work(&cdma->timeout.wq, in cdma_start_timer_locked() 200 cancel_delayed_work(&cdma->timeout.wq); in stop_cdma_timer_locked()
|
/linux-4.1.27/drivers/iommu/ |
D | amd_iommu_v2.c | 59 wait_queue_head_t wq; /* To wait for count == 0 */ member 74 wait_queue_head_t wq; member 151 wake_up(&dev_state->wq); in put_device_state() 264 wake_up(&pasid_state->wq); in put_pasid_state() 270 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); in put_pasid_state_wait() 636 init_waitqueue_head(&pasid_state->wq); in amd_iommu_bind_pasid() 760 init_waitqueue_head(&dev_state->wq); in amd_iommu_init_device() 848 wait_event(dev_state->wq, !atomic_read(&dev_state->count)); in amd_iommu_free_device()
|
/linux-4.1.27/fs/xfs/ |
D | xfs_log_priv.h | 550 static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock) in xlog_wait() argument 554 add_wait_queue_exclusive(wq, &wait); in xlog_wait() 558 remove_wait_queue(wq, &wait); in xlog_wait()
|
/linux-4.1.27/drivers/gpu/drm/tilcdc/ |
D | tilcdc_drv.c | 144 flush_workqueue(priv->wq); in tilcdc_unload() 145 destroy_workqueue(priv->wq); in tilcdc_unload() 174 priv->wq = alloc_ordered_workqueue("tilcdc", 0); in tilcdc_load() 175 if (!priv->wq) { in tilcdc_load() 326 flush_workqueue(priv->wq); in tilcdc_load() 327 destroy_workqueue(priv->wq); in tilcdc_load()
|
D | tilcdc_drv.h | 77 struct workqueue_struct *wq; member
|
/linux-4.1.27/drivers/target/tcm_fc/ |
D | tfc_conf.c | 303 struct workqueue_struct *wq; in ft_add_tpg() local 334 wq = alloc_workqueue("tcm_fc", 0, 1); in ft_add_tpg() 335 if (!wq) { in ft_add_tpg() 343 destroy_workqueue(wq); in ft_add_tpg() 347 tpg->workqueue = wq; in ft_add_tpg()
|
/linux-4.1.27/fs/logfs/ |
D | dev_bdev.c | 54 static DECLARE_WAIT_QUEUE_HEAD(wq); 73 wake_up(&wq); in writeseg_end_io() 167 wake_up(&wq); in erase_end_io() 246 wait_event(wq, atomic_read(&super->s_pending_writes) == 0); in bdev_sync()
|
/linux-4.1.27/drivers/mtd/ubi/ |
D | block.c | 92 struct workqueue_struct *wq; member 332 queue_work(dev->wq, &pdu->work); in ubiblock_queue_rq() 428 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); in ubiblock_create() 429 if (!dev->wq) { in ubiblock_create() 461 destroy_workqueue(dev->wq); in ubiblock_cleanup()
|
D | io.c | 336 wait_queue_head_t wq; in do_sync_erase() local 347 init_waitqueue_head(&wq); in do_sync_erase() 354 ei.priv = (unsigned long)&wq; in do_sync_erase() 369 err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || in do_sync_erase()
|
/linux-4.1.27/drivers/gpu/drm/atmel-hlcdc/ |
D | atmel_hlcdc_dc.c | 300 dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); in atmel_hlcdc_dc_load() 301 if (!dc->wq) in atmel_hlcdc_dc_load() 352 destroy_workqueue(dc->wq); in atmel_hlcdc_dc_load() 363 flush_workqueue(dc->wq); in atmel_hlcdc_dc_unload() 376 destroy_workqueue(dc->wq); in atmel_hlcdc_dc_unload()
|
D | atmel_hlcdc_dc.h | 139 struct workqueue_struct *wq; member
|
D | atmel_hlcdc_layer.c | 62 drm_flip_work_commit(&layer->gc, layer->wq); in atmel_hlcdc_layer_fb_flip_release_queue() 611 layer->wq = dc->wq; in atmel_hlcdc_layer_init()
|
/linux-4.1.27/net/ |
D | socket.c | 248 struct socket_wq *wq; in sock_alloc_inode() local 253 wq = kmalloc(sizeof(*wq), GFP_KERNEL); in sock_alloc_inode() 254 if (!wq) { in sock_alloc_inode() 258 init_waitqueue_head(&wq->wait); in sock_alloc_inode() 259 wq->fasync_list = NULL; in sock_alloc_inode() 260 RCU_INIT_POINTER(ei->socket.wq, wq); in sock_alloc_inode() 274 struct socket_wq *wq; in sock_destroy_inode() local 277 wq = rcu_dereference_protected(ei->socket.wq, 1); in sock_destroy_inode() 278 kfree_rcu(wq, rcu); in sock_destroy_inode() 576 if (rcu_dereference_protected(sock->wq, 1)->fasync_list) in sock_release() [all …]
|
/linux-4.1.27/drivers/gpu/host1x/hw/ |
D | cdma_hw.c | 243 timeout.wq); in cdma_timeout_handler() 296 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); in cdma_timeout_init() 308 cancel_delayed_work(&cdma->timeout.wq); in cdma_timeout_destroy()
|
/linux-4.1.27/fs/jffs2/ |
D | os-linux.h | 40 #define sleep_on_spinunlock(wq, s) \ argument 43 add_wait_queue((wq), &__wait); \ 47 remove_wait_queue((wq), &__wait); \
|
/linux-4.1.27/drivers/scsi/bfa/ |
D | bfad_im.c | 161 wait_queue_head_t *wq; in bfa_cb_tskim_done() local 165 wq = (wait_queue_head_t *) cmnd->SCp.ptr; in bfa_cb_tskim_done() 168 if (wq) in bfa_cb_tskim_done() 169 wake_up(wq); in bfa_cb_tskim_done() 298 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in bfad_im_reset_lun_handler() 326 cmnd->SCp.ptr = (char *)&wq; in bfad_im_reset_lun_handler() 334 wait_event(wq, test_bit(IO_DONE_BIT, in bfad_im_reset_lun_handler() 361 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in bfad_im_reset_bus_handler() 368 cmnd->SCp.ptr = (char *)&wq; in bfad_im_reset_bus_handler() 377 wait_event(wq, test_bit(IO_DONE_BIT, in bfad_im_reset_bus_handler()
|
/linux-4.1.27/drivers/scsi/libsas/ |
D | sas_event.c | 59 struct workqueue_struct *wq = ha->core.shost->work_q; in __sas_drain_work() local 67 drain_workqueue(wq); in __sas_drain_work()
|
/linux-4.1.27/net/atm/ |
D | common.c | 95 struct socket_wq *wq; in vcc_def_wakeup() local 98 wq = rcu_dereference(sk->sk_wq); in vcc_def_wakeup() 99 if (wq_has_sleeper(wq)) in vcc_def_wakeup() 100 wake_up(&wq->wait); in vcc_def_wakeup() 114 struct socket_wq *wq; in vcc_write_space() local 119 wq = rcu_dereference(sk->sk_wq); in vcc_write_space() 120 if (wq_has_sleeper(wq)) in vcc_write_space() 121 wake_up_interruptible(&wq->wait); in vcc_write_space()
|
/linux-4.1.27/drivers/net/ethernet/qlogic/qlcnic/ |
D | qlcnic_dcb.c | 289 if (dcb->wq) { in __qlcnic_dcb_free() 290 destroy_workqueue(dcb->wq); in __qlcnic_dcb_free() 291 dcb->wq = NULL; in __qlcnic_dcb_free() 314 dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); in __qlcnic_dcb_attach() 315 if (!dcb->wq) { in __qlcnic_dcb_attach() 339 destroy_workqueue(dcb->wq); in __qlcnic_dcb_attach() 340 dcb->wq = NULL; in __qlcnic_dcb_attach() 539 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); in qlcnic_82xx_dcb_aen_handler() 653 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); in qlcnic_83xx_dcb_aen_handler()
|
D | qlcnic_dcb.h | 39 struct workqueue_struct *wq; member
|
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/ |
D | spufs.h | 321 #define spufs_wait(wq, condition) \ argument 326 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ 339 finish_wait(&(wq), &__wait); \
|
/linux-4.1.27/include/drm/ |
D | drm_flip_work.h | 87 struct workqueue_struct *wq);
|
/linux-4.1.27/drivers/bluetooth/ |
D | bluecard_cs.c | 283 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in bluecard_write_wakeup() 306 prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); in bluecard_write_wakeup() 308 finish_wait(&wq, &wait); in bluecard_write_wakeup() 320 prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); in bluecard_write_wakeup() 322 finish_wait(&wq, &wait); in bluecard_write_wakeup()
|
/linux-4.1.27/net/9p/ |
D | trans_fd.c | 144 struct work_struct wq; member 451 m = container_of(work, struct p9_conn, wq); in p9_write_work() 510 schedule_work(&m->wq); in p9_write_work() 592 INIT_WORK(&m->wq, p9_write_work); in p9_conn_create() 644 schedule_work(&m->wq); in p9_poll_mux() 682 schedule_work(&m->wq); in p9_fd_request() 853 cancel_work_sync(&m->wq); in p9_conn_destroy()
|
D | client.c | 271 if (!req->wq) { in p9_tag_alloc() 272 req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS); in p9_tag_alloc() 273 if (!req->wq) in p9_tag_alloc() 275 init_waitqueue_head(req->wq); in p9_tag_alloc() 297 kfree(req->wq); in p9_tag_alloc() 299 req->wq = NULL; in p9_tag_alloc() 387 kfree(c->reqs[row][col].wq); in p9_tag_cleanup() 430 wake_up(req->wq); in p9_client_cb() 752 err = wait_event_interruptible(*req->wq, in p9_client_rpc()
|
/linux-4.1.27/include/linux/mtd/ |
D | flashchip.h | 90 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip member
|
D | blktrans.h | 47 struct workqueue_struct *wq; member
|
/linux-4.1.27/drivers/misc/cxl/ |
D | context.c | 62 init_waitqueue_head(&ctx->wq); in cxl_context_init() 191 wake_up_all(&ctx->wq); in __detach_context()
|
D | file.c | 256 poll_wait(file, &ctx->wq, poll); in afu_poll() 296 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); in afu_read() 317 finish_wait(&ctx->wq, &wait); in afu_read() 357 finish_wait(&ctx->wq, &wait); in afu_read()
|
/linux-4.1.27/arch/cris/mm/ |
D | fault.c | 223 DECLARE_WAIT_QUEUE_HEAD(wq); in do_page_fault() 235 wait_event_interruptible(wq, 0 == 1); in do_page_fault()
|
/linux-4.1.27/virt/kvm/ |
D | async_pf.c | 97 if (waitqueue_active(&vcpu->wq)) in async_pf_execute() 98 wake_up_interruptible(&vcpu->wq); in async_pf_execute()
|
/linux-4.1.27/drivers/char/tpm/ |
D | tpm_ibmvtpm.h | 45 wait_queue_head_t wq; member
|
D | tpm_ibmvtpm.c | 93 sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0); in tpm_ibmvtpm_recv() 518 wake_up_interruptible(&ibmvtpm->wq); in ibmvtpm_crq_process() 624 init_waitqueue_head(&ibmvtpm->wq); in tpm_ibmvtpm_probe()
|
/linux-4.1.27/arch/arm/common/ |
D | bL_switcher.c | 263 wait_queue_head_t wq; member 286 wait_event_interruptible(t->wq, in bL_switcher_thread() 372 wake_up(&t->wq); in bL_switch_request_cb() 584 init_waitqueue_head(&t->wq); in bL_switcher_enable()
|
/linux-4.1.27/drivers/uwb/ |
D | uwbd.c | 276 rc->uwbd.wq, in uwbd() 341 wake_up_all(&rc->uwbd.wq); in uwbd_event_queue()
|
/linux-4.1.27/drivers/mtd/ |
D | mtd_blkdevs.c | 188 queue_work(dev->wq, &dev->work); in mtd_blktrans_request() 432 new->wq = alloc_workqueue("%s%d", 0, 0, in add_mtd_blktrans_dev() 434 if (!new->wq) in add_mtd_blktrans_dev() 478 destroy_workqueue(old->wq); in del_mtd_blktrans_dev()
|
/linux-4.1.27/drivers/net/caif/ |
D | caif_hsi.c | 78 queue_work(cfhsi->wq, &cfhsi->wake_down_work); in cfhsi_inactivity_tout() 988 queue_work(cfhsi->wq, &cfhsi->wake_up_work); in cfhsi_wake_up_cb() 1107 queue_work(cfhsi->wq, &cfhsi->wake_up_work); in cfhsi_xmit() 1204 cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name); in cfhsi_open() 1205 if (!cfhsi->wq) { in cfhsi_open() 1251 destroy_workqueue(cfhsi->wq); in cfhsi_open() 1271 flush_workqueue(cfhsi->wq); in cfhsi_close() 1282 destroy_workqueue(cfhsi->wq); in cfhsi_close()
|
D | caif_spi.c | 641 cfspi->wq = create_singlethread_workqueue(dev->name); in cfspi_init() 642 if (!cfspi->wq) { in cfspi_init() 665 queue_work(cfspi->wq, &cfspi->work); in cfspi_init() 692 destroy_workqueue(cfspi->wq); in cfspi_uninit()
|
/linux-4.1.27/Documentation/spi/ |
D | 00-INDEX | 10 - PXA2xx SPI master controller build by spi_message fifo wq
|
/linux-4.1.27/drivers/mtd/nand/ |
D | tmio_nand.c | 175 if (unlikely(!waitqueue_active(&nand_chip->controller->wq))) in tmio_irq() 178 wake_up(&nand_chip->controller->wq); in tmio_irq() 198 timeout = wait_event_timeout(nand_chip->controller->wq, in tmio_nand_wait()
|
/linux-4.1.27/drivers/staging/nvec/ |
D | nvec.h | 149 struct workqueue_struct *wq; member
|
/linux-4.1.27/drivers/gpu/drm/via/ |
D | via_dmablit.h | 77 struct work_struct wq; member
|
D | via_dmablit.c | 356 schedule_work(&blitq->wq); in via_dmablit_handler() 498 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); in via_dmablit_workqueue() 566 INIT_WORK(&blitq->wq, via_dmablit_workqueue); in via_init_dmablit()
|
/linux-4.1.27/net/dccp/ |
D | output.c | 200 struct socket_wq *wq; in dccp_write_space() local 203 wq = rcu_dereference(sk->sk_wq); in dccp_write_space() 204 if (wq_has_sleeper(wq)) in dccp_write_space() 205 wake_up_interruptible(&wq->wait); in dccp_write_space()
|
/linux-4.1.27/fs/ocfs2/dlm/ |
D | dlmmaster.c | 280 init_waitqueue_head(&mle->wq); in dlm_init_mle() 571 init_waitqueue_head(&res->wq); in dlm_init_lockres() 685 wake_up(&res->wq); in dlm_lockres_drop_inflight_ref() 1010 wake_up(&res->wq); in dlm_get_lock_resource() 1133 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery() 1782 wake_up(&res->wq); in dlm_do_assert_master() 1961 wake_up(&mle->wq); in dlm_assert_master_handler() 1982 wake_up(&res->wq); in dlm_assert_master_handler() 2080 wake_up(&res->wq); in dlm_assert_master_post_handler() 2616 ret = wait_event_interruptible_timeout(mle->wq, in dlm_migrate_lockres() [all …]
|
D | dlmconvert.c | 94 wake_up(&res->wq); in dlmconvert_master() 352 wake_up(&res->wq); in dlmconvert_remote() 539 wake_up(&res->wq); in dlm_convert_lock_handler()
|
/linux-4.1.27/drivers/block/ |
D | loop.h | 57 struct workqueue_struct *wq; member
|
/linux-4.1.27/drivers/net/can/spi/ |
D | mcp251x.c | 258 struct workqueue_struct *wq; member 530 queue_work(priv->wq, &priv->tx_work); in mcp251x_hard_start_xmit() 547 queue_work(priv->wq, &priv->restart_work); in mcp251x_do_set_mode() 701 destroy_workqueue(priv->wq); in mcp251x_stop() 702 priv->wq = NULL; in mcp251x_stop() 963 priv->wq = create_freezable_workqueue("mcp251x_wq"); in mcp251x_open() 1229 queue_work(priv->wq, &priv->restart_work); in mcp251x_can_resume()
|
/linux-4.1.27/drivers/gpu/drm/msm/ |
D | msm_gpu.c | 242 queue_work(priv->wq, &gpu->inactive_work); in inactive_handler() 311 queue_work(priv->wq, &gpu->recover_work); in hangcheck_handler() 319 queue_work(priv->wq, &gpu->retire_work); in hangcheck_handler() 458 queue_work(priv->wq, &gpu->retire_work); in msm_gpu_retire()
|
D | msm_drv.c | 137 flush_workqueue(priv->wq); in msm_unload() 138 destroy_workqueue(priv->wq); in msm_unload() 279 priv->wq = alloc_ordered_workqueue("msm", 0); in msm_load() 697 queue_work(priv->wq, &cb->work); in msm_queue_fence_cb() 722 queue_work(priv->wq, &cb->work); in msm_update_fence()
|
/linux-4.1.27/drivers/gpu/drm/omapdrm/ |
D | omap_crtc.c | 292 queue_work(priv->wq, &omap_crtc->apply_work); in omap_crtc_apply_irq() 349 queue_work(priv->wq, &omap_crtc->apply_work); in apply_worker() 379 queue_work(priv->wq, &omap_crtc->apply_work); in omap_crtc_apply() 602 queue_work(priv->wq, &omap_crtc->page_flip_work); in page_flip_cb()
|
/linux-4.1.27/drivers/ps3/ |
D | ps3av.c | 47 struct workqueue_struct *wq; member 488 queue_work(ps3av->wq, &ps3av->work); in ps3av_set_videomode() 959 ps3av->wq = create_singlethread_workqueue("ps3avd"); in ps3av_probe() 960 if (!ps3av->wq) { in ps3av_probe() 1021 if (ps3av->wq) in ps3av_remove() 1022 destroy_workqueue(ps3av->wq); in ps3av_remove()
|
/linux-4.1.27/include/linux/power/ |
D | charger-manager.h | 70 struct work_struct wq; member
|
/linux-4.1.27/net/nfc/hci/ |
D | hci.h | 39 wait_queue_head_t *wq; member
|
D | command.c | 64 wake_up(hcp_ew->wq); in nfc_hci_execute_cb() 73 hcp_ew.wq = &ew_wq; in nfc_hci_execute_cmd()
|
/linux-4.1.27/fs/ext4/ |
D | page-io.c | 216 struct workqueue_struct *wq; in ext4_add_complete_io() local 223 wq = sbi->rsv_conversion_wq; in ext4_add_complete_io() 225 queue_work(wq, &ei->i_rsv_conversion_work); in ext4_add_complete_io()
|
/linux-4.1.27/drivers/media/platform/vsp1/ |
D | vsp1_video.h | 69 wait_queue_head_t wq; member
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
D | niobuf.c | 246 wait_queue_head_t *wq; in ptlrpc_unregister_bulk() local 279 wq = &req->rq_set->set_waitq; in ptlrpc_unregister_bulk() 281 wq = &req->rq_reply_waitq; in ptlrpc_unregister_bulk() 288 rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi); in ptlrpc_unregister_bulk()
|
/linux-4.1.27/drivers/char/ |
D | tlclk.c | 200 static DECLARE_WAIT_QUEUE_HEAD(wq); 253 wait_event_interruptible(wq, got_event); in tlclk_read() 873 wake_up(&wq); in switchover_timeout() 929 wake_up(&wq); in tlclk_interrupt()
|
/linux-4.1.27/drivers/gpu/drm/i2c/ |
D | adv7511.c | 38 wait_queue_head_t wq; member 448 wake_up_all(&adv7511->wq); in adv7511_irq_process() 472 ret = wait_event_interruptible_timeout(adv7511->wq, in adv7511_wait_for_edid() 917 init_waitqueue_head(&adv7511->wq); in adv7511_probe()
|
/linux-4.1.27/drivers/mfd/ |
D | dln2.c | 87 wait_queue_head_t wq; member 388 ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq, in alloc_rx_slot() 426 wake_up_interruptible(&rxs->wq); in free_rx_slot() 753 init_waitqueue_head(&dln2->mod_rx_slots[i].wq); in dln2_probe()
|
/linux-4.1.27/drivers/net/wireless/ath/ar5523/ |
D | ar5523.h | 93 struct workqueue_struct *wq; member
|
/linux-4.1.27/drivers/md/ |
D | dm-era-target.c | 1152 struct workqueue_struct *wq; member 1207 queue_work(era->wq, &era->worker); in wake_worker() 1374 flush_workqueue(era->wq); in stop_worker() 1397 if (era->wq) in era_destroy() 1398 destroy_workqueue(era->wq); in era_destroy() 1500 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in era_ctr() 1501 if (!era->wq) { in era_ctr()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/ |
D | cmd.c | 627 } else if (!queue_work(cmd->wq, &ent->work)) { in mlx5_cmd_invoke() 1054 flush_workqueue(cmd->wq); in mlx5_cmd_use_events() 1073 flush_workqueue(cmd->wq); in mlx5_cmd_use_polling() 1453 cmd->wq = create_singlethread_workqueue(cmd->wq_name); in mlx5_cmd_init() 1454 if (!cmd->wq) { in mlx5_cmd_init() 1469 destroy_workqueue(cmd->wq); in mlx5_cmd_init() 1489 destroy_workqueue(cmd->wq); in mlx5_cmd_cleanup()
|
/linux-4.1.27/fs/cachefiles/ |
D | namei.c | 201 wait_queue_head_t *wq; in cachefiles_mark_object_active() local 218 wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE); in cachefiles_mark_object_active() 222 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); in cachefiles_mark_object_active() 228 finish_wait(wq, &wait); in cachefiles_mark_object_active()
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
D | i915_dma.c | 892 dev_priv->wq = alloc_ordered_workqueue("i915", 0); in i915_driver_load() 893 if (dev_priv->wq == NULL) { in i915_driver_load() 997 destroy_workqueue(dev_priv->wq); in i915_driver_load() 1071 flush_workqueue(dev_priv->wq); in i915_driver_unload() 1084 destroy_workqueue(dev_priv->wq); in i915_driver_unload()
|
/linux-4.1.27/include/net/caif/ |
D | caif_hsi.h | 165 struct workqueue_struct *wq; member
|
D | caif_spi.h | 115 struct workqueue_struct *wq; member
|
/linux-4.1.27/drivers/gpu/drm/ttm/ |
D | ttm_bo.c | 466 schedule_delayed_work(&bdev->wq, in ttm_bo_cleanup_refs_or_queue() 613 container_of(work, struct ttm_bo_device, wq.work); in ttm_bo_delayed_workqueue() 616 schedule_delayed_work(&bdev->wq, in ttm_bo_delayed_workqueue() 647 return cancel_delayed_work_sync(&bdev->wq); in ttm_bo_lock_delayed_workqueue() 654 schedule_delayed_work(&bdev->wq, in ttm_bo_unlock_delayed_workqueue() 1434 cancel_delayed_work_sync(&bdev->wq); in ttm_bo_device_release() 1476 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); in ttm_bo_device_init()
|
/linux-4.1.27/drivers/net/bonding/ |
D | bond_main.c | 549 queue_delayed_work(bond->wq, &bond->mcast_work, 1); in bond_resend_igmp_join_requests_delayed() 556 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); in bond_resend_igmp_join_requests_delayed() 885 queue_delayed_work(bond->wq, &bond->mcast_work, 1); in bond_change_active_slave() 1287 queue_delayed_work(slave->bond->wq, &nnw->work, 0); in bond_queue_slave_event() 2196 queue_delayed_work(bond->wq, &bond->mii_work, delay); in bond_mii_monitor() 2613 queue_delayed_work(bond->wq, &bond->arp_work, in bond_loadbalance_arp_mon() 2886 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); in bond_activebackup_arp_mon() 3212 queue_delayed_work(bond->wq, &bond->alb_work, 0); in bond_open() 3216 queue_delayed_work(bond->wq, &bond->mii_work, 0); in bond_open() 3219 queue_delayed_work(bond->wq, &bond->arp_work, 0); in bond_open() [all …]
|
/linux-4.1.27/drivers/media/pci/dm1105/ |
D | dm1105.c | 360 struct workqueue_struct *wq; member 730 queue_work(dev->wq, &dev->work); in dm1105_irq() 1140 dev->wq = create_singlethread_workqueue(dev->wqn); in dm1105_probe() 1141 if (!dev->wq) { in dm1105_probe() 1154 destroy_workqueue(dev->wq); in dm1105_probe()
|
/linux-4.1.27/drivers/net/ |
D | macvtap.c | 38 struct socket_wq wq; member 483 RCU_INIT_POINTER(q->sock.wq, &q->wq); in macvtap_open() 484 init_waitqueue_head(&q->wq.wait); in macvtap_open() 533 poll_wait(file, &q->wq.wait, wait); in macvtap_poll()
|
/linux-4.1.27/drivers/infiniband/core/ |
D | mad_priv.h | 208 struct workqueue_struct *wq; member
|
/linux-4.1.27/fs/fscache/ |
D | page.c | 40 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); in __fscache_wait_on_page_write() local 42 wait_event(*wq, !__fscache_check_page_write(cookie, page)); in __fscache_wait_on_page_write() 53 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); in release_page_wait_timeout() local 55 return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), in release_page_wait_timeout()
|