Home
last modified time | relevance | path

Searched refs:q (Results 1 – 200 of 995) sorted by relevance

12345

/linux-4.4.14/crypto/
Dgf128mul.c56 #define gf128mul_dat(q) { \ argument
57 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
58 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
59 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
60 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
61 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
62 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
63 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
64 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
65 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
[all …]
Dalgapi.c186 struct crypto_alg *q; in __crypto_register_alg() local
200 list_for_each_entry(q, &crypto_alg_list, cra_list) { in __crypto_register_alg()
201 if (q == alg) in __crypto_register_alg()
204 if (crypto_is_moribund(q)) in __crypto_register_alg()
207 if (crypto_is_larval(q)) { in __crypto_register_alg()
208 if (!strcmp(alg->cra_driver_name, q->cra_driver_name)) in __crypto_register_alg()
213 if (!strcmp(q->cra_driver_name, alg->cra_name) || in __crypto_register_alg()
214 !strcmp(q->cra_name, alg->cra_driver_name)) in __crypto_register_alg()
250 struct crypto_alg *q; in crypto_alg_tested() local
254 list_for_each_entry(q, &crypto_alg_list, cra_list) { in crypto_alg_tested()
[all …]
/linux-4.4.14/drivers/media/v4l2-core/
Dvideobuf-core.c52 #define CALL(q, f, arg...) \ argument
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
54 #define CALLPTR(q, f, arg...) \ argument
55 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
57 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) in videobuf_alloc_vb() argument
61 BUG_ON(q->msize < sizeof(*vb)); in videobuf_alloc_vb()
63 if (!q->int_ops || !q->int_ops->alloc_vb) { in videobuf_alloc_vb()
68 vb = q->int_ops->alloc_vb(q->msize); in videobuf_alloc_vb()
78 static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb) in is_state_active_or_queued() argument
83 spin_lock_irqsave(q->irqlock, flags); in is_state_active_or_queued()
[all …]
Dvideobuf2-core.c37 static void __vb2_queue_cancel(struct vb2_queue *q);
45 struct vb2_queue *q = vb->vb2_queue; in __vb2_buf_mem_alloc() local
47 q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in __vb2_buf_mem_alloc()
56 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); in __vb2_buf_mem_alloc()
58 mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane], in __vb2_buf_mem_alloc()
59 size, dma_dir, q->gfp_flags); in __vb2_buf_mem_alloc()
65 vb->planes[plane].length = q->plane_sizes[plane]; in __vb2_buf_mem_alloc()
143 static void __setup_lengths(struct vb2_queue *q, unsigned int n) in __setup_lengths() argument
148 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { in __setup_lengths()
149 vb = q->bufs[buffer]; in __setup_lengths()
[all …]
Dvideobuf2-v4l2.c114 struct vb2_queue *q = vb->vb2_queue; in __set_timestamp() local
116 if (q->is_output) { in __set_timestamp()
121 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == in __set_timestamp()
148 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, in vb2_queue_or_prepare_buf() argument
151 if (b->type != q->type) { in vb2_queue_or_prepare_buf()
156 if (b->index >= q->num_buffers) { in vb2_queue_or_prepare_buf()
161 if (q->bufs[b->index] == NULL) { in vb2_queue_or_prepare_buf()
167 if (b->memory != q->memory) { in vb2_queue_or_prepare_buf()
172 return __verify_planes_array(q->bufs[b->index], b); in vb2_queue_or_prepare_buf()
183 struct vb2_queue *q = vb->vb2_queue; in __fill_v4l2_buffer() local
[all …]
Dvideobuf2-internal.h66 #define log_qop(q, op) \ argument
67 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
68 (q)->ops->op ? "" : " (nop)")
70 #define call_qop(q, op, args...) \ argument
74 log_qop(q, op); \
75 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
77 (q)->cnt_ ## op++; \
81 #define call_void_qop(q, op, args...) \ argument
83 log_qop(q, op); \
84 if ((q)->ops->op) \
[all …]
Dvideobuf-dma-contig.c70 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", in videobuf_vm_open()
79 struct videobuf_queue *q = map->q; in videobuf_vm_close() local
82 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", in videobuf_vm_close()
89 dev_dbg(q->dev, "munmap %p q=%p\n", map, q); in videobuf_vm_close()
90 videobuf_queue_lock(q); in videobuf_vm_close()
93 if (q->streaming) in videobuf_vm_close()
94 videobuf_queue_cancel(q); in videobuf_vm_close()
97 if (NULL == q->bufs[i]) in videobuf_vm_close()
100 if (q->bufs[i]->map != map) in videobuf_vm_close()
103 mem = q->bufs[i]->priv; in videobuf_vm_close()
[all …]
Dvideobuf-vmalloc.c67 struct videobuf_queue *q = map->q; in videobuf_vm_close() local
77 dprintk(1, "munmap %p q=%p\n", map, q); in videobuf_vm_close()
78 videobuf_queue_lock(q); in videobuf_vm_close()
81 if (q->streaming) in videobuf_vm_close()
82 videobuf_queue_cancel(q); in videobuf_vm_close()
85 if (NULL == q->bufs[i]) in videobuf_vm_close()
88 if (q->bufs[i]->map != map) in videobuf_vm_close()
91 mem = q->bufs[i]->priv; in videobuf_vm_close()
111 q->bufs[i]->map = NULL; in videobuf_vm_close()
112 q->bufs[i]->baddr = 0; in videobuf_vm_close()
[all …]
Dvideobuf-dma-sg.c397 struct videobuf_queue *q = map->q; in videobuf_vm_close() local
406 dprintk(1, "munmap %p q=%p\n", map, q); in videobuf_vm_close()
407 videobuf_queue_lock(q); in videobuf_vm_close()
409 if (NULL == q->bufs[i]) in videobuf_vm_close()
411 mem = q->bufs[i]->priv; in videobuf_vm_close()
417 if (q->bufs[i]->map != map) in videobuf_vm_close()
419 q->bufs[i]->map = NULL; in videobuf_vm_close()
420 q->bufs[i]->baddr = 0; in videobuf_vm_close()
421 q->ops->buf_release(q, q->bufs[i]); in videobuf_vm_close()
423 videobuf_queue_unlock(q); in videobuf_vm_close()
[all …]
/linux-4.4.14/sound/core/seq/
Dseq_queue.c63 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument
71 queue_list[i] = q; in queue_list_add()
72 q->queue = i; in queue_list_add()
84 struct snd_seq_queue *q; in queue_list_remove() local
88 q = queue_list[id]; in queue_list_remove()
89 if (q) { in queue_list_remove()
90 spin_lock(&q->owner_lock); in queue_list_remove()
91 if (q->owner == client) { in queue_list_remove()
93 q->klocked = 1; in queue_list_remove()
94 spin_unlock(&q->owner_lock); in queue_list_remove()
[all …]
Dseq_queue.h95 #define queuefree(q) snd_use_lock_free(&(q)->use_lock) argument
101 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop);
120 #define udiv_qrnnd(q, r, n1, n0, d) \ argument
122 : "=a" ((u32)(q)), \
128 #define u64_div(x,y,q) do {u32 __tmp; udiv_qrnnd(q, __tmp, (x)>>32, x, y);} while (0) argument
129 #define u64_mod(x,y,r) do {u32 __tmp; udiv_qrnnd(__tmp, q, (x)>>32, x, y);} while (0)
130 #define u64_divmod(x,y,q,r) udiv_qrnnd(q, r, (x)>>32, x, y) argument
133 #define u64_div(x,y,q) ((q) = (u32)((u64)(x) / (u64)(y))) argument
135 #define u64_divmod(x,y,q,r) (u64_div(x,y,q), u64_mod(x,y,r)) argument
Dseq_timer.c140 struct snd_seq_queue *q = timeri->callback_data; in snd_seq_timer_interrupt() local
143 if (q == NULL) in snd_seq_timer_interrupt()
145 tmr = q->timer; in snd_seq_timer_interrupt()
173 snd_seq_check_queue(q, 1, 0); in snd_seq_timer_interrupt()
270 int snd_seq_timer_open(struct snd_seq_queue *q) in snd_seq_timer_open() argument
277 tmr = q->timer; in snd_seq_timer_open()
282 sprintf(str, "sequencer queue %i", q->queue); in snd_seq_timer_open()
287 err = snd_timer_open(&t, str, &tmr->alsa_id, q->queue); in snd_seq_timer_open()
297 err = snd_timer_open(&t, str, &tid, q->queue); in snd_seq_timer_open()
305 t->callback_data = q; in snd_seq_timer_open()
[all …]
/linux-4.4.14/drivers/isdn/hardware/eicon/
Ddqueue.c17 diva_data_q_init(diva_um_idi_data_queue_t *q, in diva_data_q_init() argument
22 q->max_length = max_length; in diva_data_q_init()
23 q->segments = max_segments; in diva_data_q_init()
25 for (i = 0; i < q->segments; i++) { in diva_data_q_init()
26 q->data[i] = NULL; in diva_data_q_init()
27 q->length[i] = 0; in diva_data_q_init()
29 q->read = q->write = q->count = q->segment_pending = 0; in diva_data_q_init()
31 for (i = 0; i < q->segments; i++) { in diva_data_q_init()
32 if (!(q->data[i] = diva_os_malloc(0, q->max_length))) { in diva_data_q_init()
33 diva_data_q_finit(q); in diva_data_q_init()
[all …]
Ddqueue.h19 int diva_data_q_init(diva_um_idi_data_queue_t *q,
21 int diva_data_q_finit(diva_um_idi_data_queue_t *q);
22 int diva_data_q_get_max_length(const diva_um_idi_data_queue_t *q);
23 void *diva_data_q_get_segment4write(diva_um_idi_data_queue_t *q);
24 void diva_data_q_ack_segment4write(diva_um_idi_data_queue_t *q,
27 q);
28 int diva_data_q_get_segment_length(const diva_um_idi_data_queue_t *q);
29 void diva_data_q_ack_segment4read(diva_um_idi_data_queue_t *q);
/linux-4.4.14/sound/core/seq/oss/
Dseq_oss_readq.c48 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local
50 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_readq_new()
51 if (!q) in snd_seq_oss_readq_new()
54 q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); in snd_seq_oss_readq_new()
55 if (!q->q) { in snd_seq_oss_readq_new()
56 kfree(q); in snd_seq_oss_readq_new()
60 q->maxlen = maxlen; in snd_seq_oss_readq_new()
61 q->qlen = 0; in snd_seq_oss_readq_new()
62 q->head = q->tail = 0; in snd_seq_oss_readq_new()
63 init_waitqueue_head(&q->midi_sleep); in snd_seq_oss_readq_new()
[all …]
Dseq_oss_writeq.c39 struct seq_oss_writeq *q; in snd_seq_oss_writeq_new() local
42 if ((q = kzalloc(sizeof(*q), GFP_KERNEL)) == NULL) in snd_seq_oss_writeq_new()
44 q->dp = dp; in snd_seq_oss_writeq_new()
45 q->maxlen = maxlen; in snd_seq_oss_writeq_new()
46 spin_lock_init(&q->sync_lock); in snd_seq_oss_writeq_new()
47 q->sync_event_put = 0; in snd_seq_oss_writeq_new()
48 q->sync_time = 0; in snd_seq_oss_writeq_new()
49 init_waitqueue_head(&q->sync_sleep); in snd_seq_oss_writeq_new()
58 return q; in snd_seq_oss_writeq_new()
65 snd_seq_oss_writeq_delete(struct seq_oss_writeq *q) in snd_seq_oss_writeq_delete() argument
[all …]
Dseq_oss_event.c34 static int extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev);
39 static int old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev);
54 snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) in snd_seq_oss_process_event() argument
56 switch (q->s.code) { in snd_seq_oss_process_event()
58 return extended_event(dp, q, ev); in snd_seq_oss_process_event()
61 return chn_voice_event(dp, q, ev); in snd_seq_oss_process_event()
64 return chn_common_event(dp, q, ev); in snd_seq_oss_process_event()
67 return timing_event(dp, q, ev); in snd_seq_oss_process_event()
70 return local_event(dp, q, ev); in snd_seq_oss_process_event()
73 return snd_seq_oss_synth_sysex(dp, q->x.dev, q->x.buf, ev); in snd_seq_oss_process_event()
[all …]
Dseq_oss_readq.h32 union evrec *q; member
43 void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
49 int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
50 void snd_seq_oss_readq_wait(struct seq_oss_readq *q);
51 void snd_seq_oss_readq_free(struct seq_oss_readq *q);
53 #define snd_seq_oss_readq_lock(q, flags) spin_lock_irqsave(&(q)->lock, flags) argument
54 #define snd_seq_oss_readq_unlock(q, flags) spin_unlock_irqrestore(&(q)->lock, flags) argument
Dseq_oss_writeq.h42 void snd_seq_oss_writeq_delete(struct seq_oss_writeq *q);
43 void snd_seq_oss_writeq_clear(struct seq_oss_writeq *q);
44 int snd_seq_oss_writeq_sync(struct seq_oss_writeq *q);
45 void snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time);
46 int snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q);
47 void snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int size);
/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_mq.c36 void *c2_mq_alloc(struct c2_mq *q) in c2_mq_alloc() argument
38 BUG_ON(q->magic != C2_MQ_MAGIC); in c2_mq_alloc()
39 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); in c2_mq_alloc()
41 if (c2_mq_full(q)) { in c2_mq_alloc()
46 (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size); in c2_mq_alloc()
53 return q->msg_pool.host + q->priv * q->msg_size; in c2_mq_alloc()
58 void c2_mq_produce(struct c2_mq *q) in c2_mq_produce() argument
60 BUG_ON(q->magic != C2_MQ_MAGIC); in c2_mq_produce()
61 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); in c2_mq_produce()
63 if (!c2_mq_full(q)) { in c2_mq_produce()
[all …]
Dc2_mq.h86 static __inline__ int c2_mq_empty(struct c2_mq *q) in c2_mq_empty() argument
88 return q->priv == be16_to_cpu(*q->shared); in c2_mq_empty()
91 static __inline__ int c2_mq_full(struct c2_mq *q) in c2_mq_full() argument
93 return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size; in c2_mq_full()
96 extern void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
97 extern void *c2_mq_alloc(struct c2_mq *q);
98 extern void c2_mq_produce(struct c2_mq *q);
99 extern void *c2_mq_consume(struct c2_mq *q);
100 extern void c2_mq_free(struct c2_mq *q);
101 extern void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
[all …]
/linux-4.4.14/drivers/s390/cio/
Dqdio_main.c99 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) in qdio_check_ccq() argument
111 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); in qdio_check_ccq()
126 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument
129 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; in qdio_do_eqbs()
132 qperf_inc(q, eqbs); in qdio_do_eqbs()
134 if (!q->is_input_q) in qdio_do_eqbs()
135 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs()
137 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs()
139 rc = qdio_check_ccq(q, ccq); in qdio_do_eqbs()
144 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); in qdio_do_eqbs()
[all …]
Dqdio_debug.c115 struct qdio_q *q = m->private; in qstat_show() local
118 if (!q) in qstat_show()
122 q->timestamp, last_ai_time); in qstat_show()
124 atomic_read(&q->nr_buf_used), in qstat_show()
125 q->first_to_check, q->last_move); in qstat_show()
126 if (q->is_input_q) { in qstat_show()
128 q->u.in.polling, q->u.in.ack_start, in qstat_show()
129 q->u.in.ack_count); in qstat_show()
131 *(u32 *)q->irq_ptr->dsci, in qstat_show()
133 &q->u.in.queue_irq_state)); in qstat_show()
[all …]
Dqdio_setup.c107 struct qdio_q *q; in set_impl_params() local
121 for_each_input_queue(irq_ptr, q, i) { in set_impl_params()
123 q->slib->slibe[j].parms = in set_impl_params()
130 for_each_output_queue(irq_ptr, q, i) { in set_impl_params()
132 q->slib->slibe[j].parms = in set_impl_params()
139 struct qdio_q *q; in __qdio_allocate_qs() local
143 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); in __qdio_allocate_qs()
144 if (!q) in __qdio_allocate_qs()
147 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); in __qdio_allocate_qs()
148 if (!q->slib) { in __qdio_allocate_qs()
[all …]
Dqdio.h314 #define queue_type(q) q->irq_ptr->qib.qfmt argument
315 #define SCH_NO(q) (q->irq_ptr->schid.sch_no) argument
330 static inline void account_sbals_error(struct qdio_q *q, int count) in account_sbals_error() argument
332 q->q_stats.nr_sbal_error += count; in account_sbals_error()
333 q->q_stats.nr_sbal_total += count; in account_sbals_error()
337 static inline int multicast_outbound(struct qdio_q *q) in multicast_outbound() argument
339 return (q->irq_ptr->nr_output_qs > 1) && in multicast_outbound()
340 (q->nr == q->irq_ptr->nr_output_qs - 1); in multicast_outbound()
343 #define pci_out_supported(q) \ argument
344 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
[all …]
Dqdio_thinint.c88 struct qdio_q *q; in tiqdio_remove_input_queues() local
90 q = irq_ptr->input_qs[0]; in tiqdio_remove_input_queues()
92 if (!q || !q->entry.prev || !q->entry.next) in tiqdio_remove_input_queues()
96 list_del_rcu(&q->entry); in tiqdio_remove_input_queues()
147 struct qdio_q *q; in tiqdio_call_inq_handlers() local
150 for_each_input_queue(irq, q, i) { in tiqdio_call_inq_handlers()
153 xchg(q->irq_ptr->dsci, 0); in tiqdio_call_inq_handlers()
155 if (q->u.in.queue_start_poll) { in tiqdio_call_inq_handlers()
158 &q->u.in.queue_irq_state)) { in tiqdio_call_inq_handlers()
159 qperf_inc(q, int_discarded); in tiqdio_call_inq_handlers()
[all …]
/linux-4.4.14/net/sched/
Dsch_choke.c80 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument
82 return (q->tail - q->head) & q->tab_mask; in choke_len()
86 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument
88 return q->flags & TC_RED_ECN; in use_ecn()
92 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument
94 return q->flags & TC_RED_HARDDROP; in use_harddrop()
98 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument
101 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
102 if (q->head == q->tail) in choke_zap_head_holes()
104 } while (q->tab[q->head] == NULL); in choke_zap_head_holes()
[all …]
Dsch_sfq.c151 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument
154 return &q->slots[val].dep; in sfq_dep_head()
155 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head()
158 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument
161 return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1); in sfq_hash()
167 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local
174 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify()
177 fl = rcu_dereference_bh(q->filter_list); in sfq_classify()
179 return sfq_hash(q, skb) + 1; in sfq_classify()
193 if (TC_H_MIN(res.classid) <= q->divisor) in sfq_classify()
[all …]
Dsch_pie.c100 struct pie_sched_data *q = qdisc_priv(sch); in drop_early() local
102 u32 local_prob = q->vars.prob; in drop_early()
106 if (q->vars.burst_time > 0) in drop_early()
112 if ((q->vars.qdelay < q->params.target / 2) in drop_early()
113 && (q->vars.prob < MAX_PROB / 5)) in drop_early()
125 if (q->params.bytemode && packet_size <= mtu) in drop_early()
128 local_prob = q->vars.prob; in drop_early()
139 struct pie_sched_data *q = qdisc_priv(sch); in pie_qdisc_enqueue() local
143 q->stats.overlimit++; in pie_qdisc_enqueue()
149 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && in pie_qdisc_enqueue()
[all …]
Dsch_sfb.c125 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument
128 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen()
140 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in increment_qlen() argument
146 increment_one_qlen(sfbhash, 0, q); in increment_qlen()
150 increment_one_qlen(sfbhash, 1, q); in increment_qlen()
154 struct sfb_sched_data *q) in decrement_one_qlen() argument
157 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen()
169 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument
175 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen()
179 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen()
[all …]
Dsch_red.c49 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument
51 return q->flags & TC_RED_ECN; in red_use_ecn()
54 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument
56 return q->flags & TC_RED_HARDDROP; in red_use_harddrop()
61 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local
62 struct Qdisc *child = q->qdisc; in red_enqueue()
65 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue()
66 &q->vars, in red_enqueue()
69 if (red_is_idling(&q->vars)) in red_enqueue()
70 red_end_of_idle_period(&q->vars); in red_enqueue()
[all …]
Dsch_netem.c197 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument
199 struct clgstate *clg = &q->clg; in loss_4state()
262 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
264 struct clgstate *clg = &q->clg; in loss_gilb_ell()
283 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
285 switch (q->loss_model) { in loss_event()
288 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event()
296 return loss_4state(q); in loss_event()
304 return loss_gilb_ell(q); in loss_event()
342 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) in packet_len_2_sched_time() argument
[all …]
Dsch_fq_codel.c69 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument
72 u32 hash = skb_get_hash_perturb(skb, q->perturbation); in fq_codel_hash()
74 return reciprocal_scale(hash, q->flows_cnt); in fq_codel_hash()
80 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local
87 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify()
90 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify()
92 return fq_codel_hash(q, skb) + 1; in fq_codel_classify()
106 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify()
138 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local
148 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop()
[all …]
Dsch_multiq.c42 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() local
45 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); in multiq_classify()
61 if (band >= q->bands) in multiq_classify()
62 return q->queues[0]; in multiq_classify()
64 return q->queues[band]; in multiq_classify()
86 sch->q.qlen++; in multiq_enqueue()
96 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() local
101 for (band = 0; band < q->bands; band++) { in multiq_dequeue()
103 q->curband++; in multiq_dequeue()
104 if (q->curband >= q->bands) in multiq_dequeue()
[all …]
Dsch_fq.c129 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_set_throttled() argument
131 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; in fq_flow_set_throttled()
144 rb_insert_color(&f->rate_node, &q->delayed); in fq_flow_set_throttled()
145 q->throttled_flows++; in fq_flow_set_throttled()
146 q->stat_throttled++; in fq_flow_set_throttled()
149 if (q->time_next_delayed_flow > f->time_next_packet) in fq_flow_set_throttled()
150 q->time_next_delayed_flow = f->time_next_packet; in fq_flow_set_throttled()
176 static void fq_gc(struct fq_sched_data *q, in fq_gc() argument
205 q->flows -= fcnt; in fq_gc()
206 q->inactive_flows -= fcnt; in fq_gc()
[all …]
Dsch_tbf.c160 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_segment() local
177 ret = qdisc_enqueue(segs, q->qdisc); in tbf_segment()
186 sch->q.qlen += nb; in tbf_segment()
195 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_enqueue() local
198 if (qdisc_pkt_len(skb) > q->max_size) { in tbf_enqueue()
199 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) in tbf_enqueue()
203 ret = qdisc_enqueue(skb, q->qdisc); in tbf_enqueue()
210 sch->q.qlen++; in tbf_enqueue()
216 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_drop() local
219 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { in tbf_drop()
[all …]
Dsch_cbq.c115 struct Qdisc *q; /* Elementary queueing discipline */ member
178 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) in cbq_class_lookup() argument
182 clc = qdisc_class_find(&q->clhash, classid); in cbq_class_lookup()
219 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_classify() local
220 struct cbq_class *head = &q->link; in cbq_classify()
231 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
250 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
303 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class() local
307 cl_tail = q->active[prio]; in cbq_activate_class()
308 q->active[prio] = cl; in cbq_activate_class()
[all …]
Dsch_hhf.c181 struct hhf_sched_data *q) in seek_list() argument
190 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in seek_list()
200 q->hh_flows_current_cnt--; in seek_list()
212 struct hhf_sched_data *q) in alloc_new_hh() argument
220 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in alloc_new_hh()
227 if (q->hh_flows_current_cnt >= q->hh_flows_limit) { in alloc_new_hh()
228 q->hh_flows_overlimit++; in alloc_new_hh()
236 q->hh_flows_current_cnt++; in alloc_new_hh()
248 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_classify() local
258 prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; in hhf_classify()
[all …]
Dsch_qfq.c211 struct qfq_sched *q = qdisc_priv(sch); in qfq_find_class() local
214 clc = qdisc_class_find(&q->clhash, classid); in qfq_find_class()
222 unsigned int len = cl->qdisc->q.qlen; in qfq_purge_queue()
265 static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_init_agg() argument
269 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); in qfq_init_agg()
275 static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, in qfq_find_agg() argument
280 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) in qfq_find_agg()
289 static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_update_agg() argument
294 if (new_num_classes == q->max_agg_classes) in qfq_update_agg()
298 new_num_classes == q->max_agg_classes - 1) /* agg no more full */ in qfq_update_agg()
[all …]
Dsch_gred.c99 struct gred_sched_data *q = table->tab[i]; in gred_wred_mode_check() local
102 if (q == NULL) in gred_wred_mode_check()
106 if (table->tab[n] && table->tab[n]->prio == q->prio) in gred_wred_mode_check()
114 struct gred_sched_data *q, in gred_backlog() argument
120 return q->backlog; in gred_backlog()
129 struct gred_sched_data *q) in gred_load_wred_set() argument
131 q->vars.qavg = table->wred_set.qavg; in gred_load_wred_set()
132 q->vars.qidlestart = table->wred_set.qidlestart; in gred_load_wred_set()
136 struct gred_sched_data *q) in gred_store_wred_set() argument
138 table->wred_set.qavg = q->vars.qavg; in gred_store_wred_set()
[all …]
Dsch_plug.c91 struct plug_sched_data *q = qdisc_priv(sch); in plug_enqueue() local
93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
94 if (!q->unplug_indefinite) in plug_enqueue()
95 q->pkts_current_epoch++; in plug_enqueue()
104 struct plug_sched_data *q = qdisc_priv(sch); in plug_dequeue() local
109 if (!q->unplug_indefinite) { in plug_dequeue()
110 if (!q->pkts_to_release) { in plug_dequeue()
117 q->pkts_to_release--; in plug_dequeue()
125 struct plug_sched_data *q = qdisc_priv(sch); in plug_init() local
127 q->pkts_current_epoch = 0; in plug_init()
[all …]
Dsch_codel.c69 struct sk_buff *skb = __skb_dequeue(&sch->q); in dequeue()
77 struct codel_sched_data *q = qdisc_priv(sch); in codel_qdisc_dequeue() local
80 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); in codel_qdisc_dequeue()
85 if (q->stats.drop_count && sch->q.qlen) { in codel_qdisc_dequeue()
86 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); in codel_qdisc_dequeue()
87 q->stats.drop_count = 0; in codel_qdisc_dequeue()
88 q->stats.drop_len = 0; in codel_qdisc_dequeue()
97 struct codel_sched_data *q; in codel_qdisc_enqueue() local
103 q = qdisc_priv(sch); in codel_qdisc_enqueue()
104 q->drop_overlimit++; in codel_qdisc_enqueue()
[all …]
Dsch_prio.c36 struct prio_sched_data *q = qdisc_priv(sch); in prio_classify() local
44 fl = rcu_dereference_bh(q->filter_list); in prio_classify()
58 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
63 if (band >= q->bands) in prio_classify()
64 return q->queues[q->prio2band[0]]; in prio_classify()
66 return q->queues[band]; in prio_classify()
88 sch->q.qlen++; in prio_enqueue()
98 struct prio_sched_data *q = qdisc_priv(sch); in prio_peek() local
101 for (prio = 0; prio < q->bands; prio++) { in prio_peek()
102 struct Qdisc *qdisc = q->queues[prio]; in prio_peek()
[all …]
Dsch_htb.c131 struct Qdisc *q; member
183 struct htb_sched *q = qdisc_priv(sch); in htb_find() local
186 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
209 struct htb_sched *q = qdisc_priv(sch); in htb_classify() local
228 tcf = rcu_dereference_bh(q->filter_list); in htb_classify()
257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
295 static void htb_add_to_wait_tree(struct htb_sched *q, in htb_add_to_wait_tree() argument
298 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
300 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
301 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
[all …]
Dsch_teql.c70 struct sk_buff_head q; member
73 #define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next) argument
83 struct teql_sched_data *q = qdisc_priv(sch); in teql_enqueue() local
85 if (q->q.qlen < dev->tx_queue_len) { in teql_enqueue()
86 __skb_queue_tail(&q->q, skb); in teql_enqueue()
99 struct Qdisc *q; in teql_dequeue() local
101 skb = __skb_dequeue(&dat->q); in teql_dequeue()
103 q = rcu_dereference_bh(dat_queue->qdisc); in teql_dequeue()
106 struct net_device *m = qdisc_dev(q); in teql_dequeue()
114 sch->q.qlen = dat->q.qlen + q->q.qlen; in teql_dequeue()
[all …]
Dsch_api.c42 struct nlmsghdr *n, struct Qdisc *q,
142 struct Qdisc_ops *q, **qp; in register_qdisc() local
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in register_qdisc()
147 if (!strcmp(qops->id, q->id)) in register_qdisc()
186 struct Qdisc_ops *q, **qp; in unregister_qdisc() local
190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in unregister_qdisc()
191 if (q == qops) in unregister_qdisc()
193 if (q) { in unregister_qdisc()
194 *qp = q->next; in unregister_qdisc()
195 q->next = NULL; in unregister_qdisc()
[all …]
Dsch_drr.c44 struct drr_sched *q = qdisc_priv(sch); in drr_find_class() local
47 clc = qdisc_class_find(&q->clhash, classid); in drr_find_class()
55 unsigned int len = cl->qdisc->q.qlen; in drr_purge_queue()
69 struct drr_sched *q = qdisc_priv(sch); in drr_change_class() local
132 qdisc_class_hash_insert(&q->clhash, &cl->common); in drr_change_class()
135 qdisc_class_hash_grow(sch, &q->clhash); in drr_change_class()
150 struct drr_sched *q = qdisc_priv(sch); in drr_delete_class() local
159 qdisc_class_hash_remove(&q->clhash, &cl->common); in drr_delete_class()
192 struct drr_sched *q = qdisc_priv(sch); in drr_tcf_chain() local
197 return &q->filter_list; in drr_tcf_chain()
[all …]
Dsch_generic.c48 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) in dev_requeue_skb() argument
50 q->gso_skb = skb; in dev_requeue_skb()
51 q->qstats.requeues++; in dev_requeue_skb()
52 q->q.qlen++; /* it's still part of the queue */ in dev_requeue_skb()
53 __netif_schedule(q); in dev_requeue_skb()
58 static void try_bulk_dequeue_skb(struct Qdisc *q, in try_bulk_dequeue_skb() argument
66 struct sk_buff *nskb = q->dequeue(q); in try_bulk_dequeue_skb()
82 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, in dequeue_skb() argument
85 struct sk_buff *skb = q->gso_skb; in dequeue_skb()
86 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb()
[all …]
Dsch_fifo.c32 if (likely(skb_queue_len(&sch->q) < sch->limit)) in pfifo_enqueue()
40 if (likely(skb_queue_len(&sch->q) < sch->limit)) in pfifo_tail_enqueue()
44 __qdisc_queue_drop_head(sch, &sch->q); in pfifo_tail_enqueue()
141 int fifo_set_limit(struct Qdisc *q, unsigned int limit) in fifo_set_limit() argument
147 if (strncmp(q->ops->id + 1, "fifo", 4) != 0) in fifo_set_limit()
156 ret = q->ops->change(q, nla); in fifo_set_limit()
166 struct Qdisc *q; in fifo_create_dflt() local
169 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1)); in fifo_create_dflt()
170 if (q) { in fifo_create_dflt()
171 err = fifo_set_limit(q, limit); in fifo_create_dflt()
[all …]
Dsch_hfsc.c227 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) in eltree_get_mindl() argument
232 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { in eltree_get_mindl()
244 eltree_get_minel(struct hfsc_sched *q) in eltree_get_minel() argument
248 n = rb_first(&q->eligible); in eltree_get_minel()
770 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) in update_vf()
897 unsigned int len = cl->qdisc->q.qlen; in hfsc_purge_queue()
923 struct hfsc_sched *q = qdisc_priv(sch); in hfsc_find_class() local
926 clc = qdisc_class_find(&q->clhash, classid); in hfsc_find_class()
973 struct hfsc_sched *q = qdisc_priv(sch); in hfsc_change_class() local
1036 if (cl->qdisc->q.qlen != 0) { in hfsc_change_class()
[all …]
Dsch_atm.c43 struct Qdisc *q; /* FIFO, TBF, etc. */ member
94 *old = flow->q; in atm_tc_graft()
95 flow->q = new; in atm_tc_graft()
106 return flow ? flow->q : NULL; in atm_tc_leaf()
143 pr_debug("atm_tc_put: qdisc %p\n", flow->q); in atm_tc_put()
144 qdisc_destroy(flow->q); in atm_tc_put()
277 flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); in atm_tc_change()
278 if (!flow->q) in atm_tc_change()
279 flow->q = &noop_qdisc; in atm_tc_change()
280 pr_debug("atm_tc_change: qdisc %p\n", flow->q); in atm_tc_change()
[all …]
Dsch_dsmark.c44 struct Qdisc *q; member
76 *old = qdisc_replace(sch, new, &p->q); in dsmark_graft()
83 return p->q; in dsmark_leaf()
254 err = qdisc_enqueue(skb, p->q); in dsmark_enqueue()
262 sch->q.qlen++; in dsmark_enqueue()
279 skb = p->q->ops->dequeue(p->q); in dsmark_dequeue()
285 sch->q.qlen--; in dsmark_dequeue()
320 return p->q->ops->peek(p->q); in dsmark_peek()
330 if (p->q->ops->drop == NULL) in dsmark_drop()
333 len = p->q->ops->drop(p->q); in dsmark_drop()
[all …]
/linux-4.4.14/drivers/mtd/spi-nor/
Dfsl-quadspi.c282 static inline int needs_swap_endian(struct fsl_qspi *q) in needs_swap_endian() argument
284 return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN; in needs_swap_endian()
287 static inline int needs_4x_clock(struct fsl_qspi *q) in needs_4x_clock() argument
289 return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK; in needs_4x_clock()
292 static inline int needs_fill_txfifo(struct fsl_qspi *q) in needs_fill_txfifo() argument
294 return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890; in needs_fill_txfifo()
297 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) in needs_wakeup_wait_mode() argument
299 return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618; in needs_wakeup_wait_mode()
306 static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) in fsl_qspi_endian_xchg() argument
308 return needs_swap_endian(q) ? __swab32(a) : a; in fsl_qspi_endian_xchg()
[all …]
/linux-4.4.14/block/
Dblk-core.c75 if (rl == &rl->q->root_rl) in blk_clear_congested()
76 clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); in blk_clear_congested()
86 if (rl == &rl->q->root_rl) in blk_set_congested()
87 set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); in blk_set_congested()
91 void blk_queue_congestion_threshold(struct request_queue *q) in blk_queue_congestion_threshold() argument
95 nr = q->nr_requests - (q->nr_requests / 8) + 1; in blk_queue_congestion_threshold()
96 if (nr > q->nr_requests) in blk_queue_congestion_threshold()
97 nr = q->nr_requests; in blk_queue_congestion_threshold()
98 q->nr_congestion_on = nr; in blk_queue_congestion_threshold()
100 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; in blk_queue_congestion_threshold()
[all …]
Dblk-sysfs.c44 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument
46 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
50 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
55 if (!q->request_fn && !q->mq_ops) in queue_requests_store()
65 if (q->request_fn) in queue_requests_store()
66 err = blk_update_nr_requests(q, nr); in queue_requests_store()
68 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
76 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
78 unsigned long ra_kb = q->backing_dev_info.ra_pages << in queue_ra_show()
85 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
[all …]
Delevator.c58 struct request_queue *q = rq->q; in elv_iosched_allow_merge() local
59 struct elevator_queue *e = q->elevator; in elv_iosched_allow_merge()
62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge()
153 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument
158 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc()
180 int elevator_init(struct request_queue *q, char *name) in elevator_init() argument
189 lockdep_assert_held(&q->sysfs_lock); in elevator_init()
191 if (unlikely(q->elevator)) in elevator_init()
194 INIT_LIST_HEAD(&q->queue_head); in elevator_init()
195 q->last_merge = NULL; in elevator_init()
[all …]
Dblk.h40 struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument
44 if (!q->mq_ops) in blk_get_flush_queue()
45 return q->fq; in blk_get_flush_queue()
47 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_get_flush_queue()
52 static inline void __blk_get_queue(struct request_queue *q) in __blk_get_queue() argument
54 kobject_get(&q->kobj); in __blk_get_queue()
57 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
59 void blk_free_flush_queue(struct blk_flush_queue *q);
61 int blk_init_rl(struct request_list *rl, struct request_queue *q,
65 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
[all …]
Dblk-settings.c33 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) in blk_queue_prep_rq() argument
35 q->prep_rq_fn = pfn; in blk_queue_prep_rq()
50 void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) in blk_queue_unprep_rq() argument
52 q->unprep_rq_fn = ufn; in blk_queue_unprep_rq()
56 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) in blk_queue_softirq_done() argument
58 q->softirq_done_fn = fn; in blk_queue_softirq_done()
62 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) in blk_queue_rq_timeout() argument
64 q->rq_timeout = timeout; in blk_queue_rq_timeout()
68 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) in blk_queue_rq_timed_out() argument
70 q->rq_timed_out_fn = fn; in blk_queue_rq_timed_out()
[all …]
Dblk-merge.c12 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split() argument
25 granularity = max(q->limits.discard_granularity >> 9, 1U); in blk_bio_discard_split()
27 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); in blk_bio_discard_split()
44 alignment = (q->limits.discard_alignment >> 9) % granularity; in blk_bio_discard_split()
55 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split() argument
62 if (!q->limits.max_write_same_sectors) in blk_bio_write_same_split()
65 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split()
68 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split()
71 static inline unsigned get_max_io_size(struct request_queue *q, in get_max_io_size() argument
74 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); in get_max_io_size()
[all …]
Dblk-mq.c81 void blk_mq_freeze_queue_start(struct request_queue *q) in blk_mq_freeze_queue_start() argument
85 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); in blk_mq_freeze_queue_start()
87 percpu_ref_kill(&q->q_usage_counter); in blk_mq_freeze_queue_start()
88 blk_mq_run_hw_queues(q, false); in blk_mq_freeze_queue_start()
93 static void blk_mq_freeze_queue_wait(struct request_queue *q) in blk_mq_freeze_queue_wait() argument
95 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); in blk_mq_freeze_queue_wait()
102 void blk_freeze_queue(struct request_queue *q) in blk_freeze_queue() argument
111 blk_mq_freeze_queue_start(q); in blk_freeze_queue()
112 blk_mq_freeze_queue_wait(q); in blk_freeze_queue()
115 void blk_mq_freeze_queue(struct request_queue *q) in blk_mq_freeze_queue() argument
[all …]
Dblk-flush.c95 static bool blk_kick_flush(struct request_queue *q,
135 if (rq->q->mq_ops) { in blk_flush_queue_rq()
136 struct request_queue *q = rq->q; in blk_flush_queue_rq() local
139 blk_mq_kick_requeue_list(q); in blk_flush_queue_rq()
143 list_add(&rq->queuelist, &rq->q->queue_head); in blk_flush_queue_rq()
145 list_add_tail(&rq->queuelist, &rq->q->queue_head); in blk_flush_queue_rq()
170 struct request_queue *q = rq->q; in blk_flush_complete_seq() local
206 if (q->mq_ops) in blk_flush_complete_seq()
216 kicked = blk_kick_flush(q, fq); in blk_flush_complete_seq()
222 struct request_queue *q = flush_rq->q; in flush_end_io() local
[all …]
Dblk-timeout.c22 int blk_should_fake_timeout(struct request_queue *q) in blk_should_fake_timeout() argument
24 if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) in blk_should_fake_timeout()
56 struct request_queue *q = disk->queue; in part_timeout_store() local
60 spin_lock_irq(q->queue_lock); in part_timeout_store()
62 queue_flag_set(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
64 queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
65 spin_unlock_irq(q->queue_lock); in part_timeout_store()
85 struct request_queue *q = req->q; in blk_rq_timed_out() local
88 if (q->rq_timed_out_fn) in blk_rq_timed_out()
89 ret = q->rq_timed_out_fn(req); in blk_rq_timed_out()
[all …]
Dblk-tag.c23 struct request *blk_queue_find_tag(struct request_queue *q, int tag) in blk_queue_find_tag() argument
25 return blk_map_queue_find_tag(q->queue_tags, tag); in blk_queue_find_tag()
61 void __blk_queue_free_tags(struct request_queue *q) in __blk_queue_free_tags() argument
63 struct blk_queue_tag *bqt = q->queue_tags; in __blk_queue_free_tags()
70 q->queue_tags = NULL; in __blk_queue_free_tags()
71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); in __blk_queue_free_tags()
82 void blk_queue_free_tags(struct request_queue *q) in blk_queue_free_tags() argument
84 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); in blk_queue_free_tags()
89 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) in init_tag_map() argument
95 if (q && depth > q->nr_requests * 2) { in init_tag_map()
[all …]
Dblk-map.c12 static bool iovec_gap_to_prv(struct request_queue *q, in iovec_gap_to_prv() argument
17 if (!queue_virt_boundary(q)) in iovec_gap_to_prv()
26 return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || in iovec_gap_to_prv()
27 prev_end & queue_virt_boundary(q)); in iovec_gap_to_prv()
30 int blk_rq_append_bio(struct request_queue *q, struct request *rq, in blk_rq_append_bio() argument
34 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio()
35 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio()
81 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, in blk_rq_map_user_iov() argument
102 if ((uaddr & queue_dma_alignment(q)) || in blk_rq_map_user_iov()
103 iovec_gap_to_prv(q, &prv, &iov)) in blk_rq_map_user_iov()
[all …]
Dblk-ioc.c41 struct elevator_type *et = icq->q->elevator->type; in ioc_exit_icq()
56 struct request_queue *q = icq->q; in ioc_destroy_icq() local
57 struct elevator_type *et = q->elevator->type; in ioc_destroy_icq()
60 lockdep_assert_held(q->queue_lock); in ioc_destroy_icq()
62 radix_tree_delete(&ioc->icq_tree, icq->q->id); in ioc_destroy_icq()
105 struct request_queue *q = icq->q; in ioc_release_fn() local
107 if (spin_trylock(q->queue_lock)) { in ioc_release_fn()
109 spin_unlock(q->queue_lock); in ioc_release_fn()
185 if (spin_trylock(icq->q->queue_lock)) { in put_io_context_active()
187 spin_unlock(icq->q->queue_lock); in put_io_context_active()
[all …]
Dbsg-lib.c100 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); in bsg_map_buffer()
113 struct request_queue *q = req->q; in bsg_create_job() local
119 job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); in bsg_create_job()
125 if (q->bsg_job_size) in bsg_create_job()
163 void bsg_request_fn(struct request_queue *q) in bsg_request_fn() argument
165 struct device *dev = q->queuedata; in bsg_request_fn()
174 req = blk_fetch_request(q); in bsg_request_fn()
177 spin_unlock_irq(q->queue_lock); in bsg_request_fn()
183 spin_lock_irq(q->queue_lock); in bsg_request_fn()
188 ret = q->bsg_job_fn(job); in bsg_request_fn()
[all …]
Dblk-cgroup.c52 static bool blkcg_policy_enabled(struct request_queue *q, in blkcg_policy_enabled() argument
55 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled()
91 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument
98 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
106 blkg->q = q; in blkg_alloc()
113 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc()
122 if (!blkcg_policy_enabled(q, pol)) in blkg_alloc()
126 pd = pol->pd_alloc_fn(gfp_mask, q->node); in blkg_alloc()
143 struct request_queue *q, bool update_hint) in blkg_lookup_slowpath() argument
153 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath()
[all …]
Dblk-mq-sysfs.c37 struct request_queue *q; in blk_mq_sysfs_show() local
42 q = ctx->queue; in blk_mq_sysfs_show()
48 mutex_lock(&q->sysfs_lock); in blk_mq_sysfs_show()
49 if (!blk_queue_dying(q)) in blk_mq_sysfs_show()
51 mutex_unlock(&q->sysfs_lock); in blk_mq_sysfs_show()
60 struct request_queue *q; in blk_mq_sysfs_store() local
65 q = ctx->queue; in blk_mq_sysfs_store()
71 mutex_lock(&q->sysfs_lock); in blk_mq_sysfs_store()
72 if (!blk_queue_dying(q)) in blk_mq_sysfs_store()
74 mutex_unlock(&q->sysfs_lock); in blk_mq_sysfs_store()
[all …]
Dblk-mq.h29 void blk_mq_freeze_queue(struct request_queue *q);
30 void blk_mq_free_queue(struct request_queue *q);
31 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
32 void blk_mq_wake_waiters(struct request_queue *q);
58 extern int blk_mq_sysfs_register(struct request_queue *q);
59 extern void blk_mq_sysfs_unregister(struct request_queue *q);
63 void blk_mq_release(struct request_queue *q);
74 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, in __blk_mq_get_ctx() argument
77 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
86 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) in blk_mq_get_ctx() argument
[all …]
Dnoop-iosched.c15 static void noop_merged_requests(struct request_queue *q, struct request *rq, in noop_merged_requests() argument
21 static int noop_dispatch(struct request_queue *q, int force) in noop_dispatch() argument
23 struct noop_data *nd = q->elevator->elevator_data; in noop_dispatch()
29 elv_dispatch_sort(q, rq); in noop_dispatch()
35 static void noop_add_request(struct request_queue *q, struct request *rq) in noop_add_request() argument
37 struct noop_data *nd = q->elevator->elevator_data; in noop_add_request()
43 noop_former_request(struct request_queue *q, struct request *rq) in noop_former_request() argument
45 struct noop_data *nd = q->elevator->elevator_data; in noop_former_request()
53 noop_latter_request(struct request_queue *q, struct request *rq) in noop_latter_request() argument
55 struct noop_data *nd = q->elevator->elevator_data; in noop_latter_request()
[all …]
Dscsi_ioctl.c60 static int scsi_get_idlun(struct request_queue *q, int __user *p) in scsi_get_idlun() argument
65 static int scsi_get_bus(struct request_queue *q, int __user *p) in scsi_get_bus() argument
70 static int sg_get_timeout(struct request_queue *q) in sg_get_timeout() argument
72 return jiffies_to_clock_t(q->sg_timeout); in sg_get_timeout()
75 static int sg_set_timeout(struct request_queue *q, int __user *p) in sg_set_timeout() argument
80 q->sg_timeout = clock_t_to_jiffies(timeout); in sg_set_timeout()
85 static int max_sectors_bytes(struct request_queue *q) in max_sectors_bytes() argument
87 unsigned int max_sectors = queue_max_sectors(q); in max_sectors_bytes()
94 static int sg_get_reserved_size(struct request_queue *q, int __user *p) in sg_get_reserved_size() argument
96 int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q)); in sg_get_reserved_size()
[all …]
Dblk-exec.c51 void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, in blk_execute_rq_nowait() argument
67 if (q->mq_ops) { in blk_execute_rq_nowait()
72 spin_lock_irq(q->queue_lock); in blk_execute_rq_nowait()
74 if (unlikely(blk_queue_dying(q))) { in blk_execute_rq_nowait()
78 spin_unlock_irq(q->queue_lock); in blk_execute_rq_nowait()
82 __elv_add_request(q, rq, where); in blk_execute_rq_nowait()
83 __blk_run_queue(q); in blk_execute_rq_nowait()
84 spin_unlock_irq(q->queue_lock); in blk_execute_rq_nowait()
99 int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, in blk_execute_rq() argument
114 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); in blk_execute_rq()
Ddeadline-iosched.c99 deadline_add_request(struct request_queue *q, struct request *rq) in deadline_add_request() argument
101 struct deadline_data *dd = q->elevator->elevator_data; in deadline_add_request()
116 static void deadline_remove_request(struct request_queue *q, struct request *rq) in deadline_remove_request() argument
118 struct deadline_data *dd = q->elevator->elevator_data; in deadline_remove_request()
125 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) in deadline_merge() argument
127 struct deadline_data *dd = q->elevator->elevator_data; in deadline_merge()
154 static void deadline_merged_request(struct request_queue *q, in deadline_merged_request() argument
157 struct deadline_data *dd = q->elevator->elevator_data; in deadline_merged_request()
169 deadline_merged_requests(struct request_queue *q, struct request *req, in deadline_merged_requests() argument
186 deadline_remove_request(q, next); in deadline_merged_requests()
[all …]
Dbounce.c182 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, in __blk_queue_bounce() argument
192 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) in __blk_queue_bounce()
202 if (page_to_pfn(page) <= queue_bounce_pfn(q)) in __blk_queue_bounce()
205 to->bv_page = mempool_alloc(pool, q->bounce_gfp); in __blk_queue_bounce()
220 trace_block_bio_bounce(q, *bio_orig); in __blk_queue_bounce()
238 void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) in blk_queue_bounce() argument
253 if (!(q->bounce_gfp & GFP_DMA)) { in blk_queue_bounce()
254 if (queue_bounce_pfn(q) >= blk_max_pfn) in blk_queue_bounce()
265 __blk_queue_bounce(q, bio_orig, pool); in blk_queue_bounce()
Dblk-lib.c44 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_discard() local
53 if (!q) in blkdev_issue_discard()
56 if (!blk_queue_discard(q)) in blkdev_issue_discard()
60 granularity = max(q->limits.discard_granularity >> 9, 1U); in blkdev_issue_discard()
64 if (!blk_queue_secdiscard(q)) in blkdev_issue_discard()
149 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_write_same() local
155 if (!q) in blkdev_issue_write_same()
288 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_zeroout() local
290 if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data && in blkdev_issue_zeroout()
/linux-4.4.14/drivers/gpu/drm/amd/amdkfd/
Dkfd_queue.c27 void print_queue_properties(struct queue_properties *q) in print_queue_properties() argument
29 if (!q) in print_queue_properties()
33 pr_debug("Queue Type: %u\n", q->type); in print_queue_properties()
34 pr_debug("Queue Size: %llu\n", q->queue_size); in print_queue_properties()
35 pr_debug("Queue percent: %u\n", q->queue_percent); in print_queue_properties()
36 pr_debug("Queue Address: 0x%llX\n", q->queue_address); in print_queue_properties()
37 pr_debug("Queue Id: %u\n", q->queue_id); in print_queue_properties()
38 pr_debug("Queue Process Vmid: %u\n", q->vmid); in print_queue_properties()
39 pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr); in print_queue_properties()
40 pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr); in print_queue_properties()
[all …]
Dkfd_mqd_manager_cik.c39 struct queue_properties *q) in init_mqd() argument
45 BUG_ON(!mm || !q || !mqd); in init_mqd()
98 if (q->format == KFD_QUEUE_FORMAT_AQL) in init_mqd()
104 retval = mm->update_mqd(mm, m, q); in init_mqd()
111 struct queue_properties *q) in init_mqd_sdma() argument
133 retval = mm->update_mqd(mm, m, q); in init_mqd_sdma()
167 struct queue_properties *q) in update_mqd() argument
171 BUG_ON(!mm || !q || !mqd); in update_mqd()
183 m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) in update_mqd()
185 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); in update_mqd()
[all …]
Dkfd_device_queue_manager.c44 struct queue *q,
52 struct queue *q,
96 struct queue *q) in allocate_vmid() argument
110 q->properties.vmid = allocated_vmid; in allocate_vmid()
112 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); in allocate_vmid()
120 struct queue *q) in deallocate_vmid() argument
129 q->properties.vmid = 0; in deallocate_vmid()
133 struct queue *q, in create_queue_nocpsch() argument
139 BUG_ON(!dqm || !q || !qpd || !allocated_vmid); in create_queue_nocpsch()
142 print_queue(q); in create_queue_nocpsch()
[all …]
Dkfd_mqd_manager_vi.c41 struct queue_properties *q) in init_mqd() argument
82 if (q->format == KFD_QUEUE_FORMAT_AQL) in init_mqd()
88 retval = mm->update_mqd(mm, m, q); in init_mqd()
102 struct queue_properties *q, unsigned int mtype, in __update_mqd() argument
107 BUG_ON(!mm || !q || !mqd); in __update_mqd()
117 ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; in __update_mqd()
120 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); in __update_mqd()
121 m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); in __update_mqd()
123 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); in __update_mqd()
124 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); in __update_mqd()
[all …]
Dkfd_process_queue_manager.c38 if (pqn->q && pqn->q->properties.queue_id == qid) in get_queue_by_qid()
100 (pqn->q != NULL) ? in pqm_uninit()
101 pqn->q->properties.queue_id : in pqm_uninit()
114 struct kfd_dev *dev, struct queue **q, in create_cp_queue() argument
132 retval = init_queue(q, *q_properties); in create_cp_queue()
136 (*q)->device = dev; in create_cp_queue()
137 (*q)->process = pqm->process; in create_cp_queue()
158 struct queue *q; in pqm_create_queue() local
168 q = NULL; in pqm_create_queue()
218 retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid); in pqm_create_queue()
[all …]
Dkfd_packet_manager.c186 struct queue *q, bool is_static) in pm_create_map_queue_vi() argument
191 BUG_ON(!pm || !buffer || !q); in pm_create_map_queue_vi()
211 switch (q->properties.type) { in pm_create_map_queue_vi()
228 q->properties.type); in pm_create_map_queue_vi()
233 q->properties.doorbell_off; in pm_create_map_queue_vi()
236 lower_32_bits(q->gart_mqd_addr); in pm_create_map_queue_vi()
239 upper_32_bits(q->gart_mqd_addr); in pm_create_map_queue_vi()
242 lower_32_bits((uint64_t)q->properties.write_ptr); in pm_create_map_queue_vi()
245 upper_32_bits((uint64_t)q->properties.write_ptr); in pm_create_map_queue_vi()
251 struct queue *q, bool is_static) in pm_create_map_queue() argument
[all …]
/linux-4.4.14/drivers/net/wireless/b43/
Dpio.c37 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument
50 cookie = (((u16)q->index + 1) << 12); in generate_cookie()
62 struct b43_pio_txqueue *q = NULL; in parse_cookie() local
67 q = pio->tx_queue_AC_BK; in parse_cookie()
70 q = pio->tx_queue_AC_BE; in parse_cookie()
73 q = pio->tx_queue_AC_VI; in parse_cookie()
76 q = pio->tx_queue_AC_VO; in parse_cookie()
79 q = pio->tx_queue_mcast; in parse_cookie()
82 if (B43_WARN_ON(!q)) in parse_cookie()
85 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie()
[all …]
Dpio.h108 static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset) in b43_piotx_read16() argument
110 return b43_read16(q->dev, q->mmio_base + offset); in b43_piotx_read16()
113 static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset) in b43_piotx_read32() argument
115 return b43_read32(q->dev, q->mmio_base + offset); in b43_piotx_read32()
118 static inline void b43_piotx_write16(struct b43_pio_txqueue *q, in b43_piotx_write16() argument
121 b43_write16(q->dev, q->mmio_base + offset, value); in b43_piotx_write16()
124 static inline void b43_piotx_write32(struct b43_pio_txqueue *q, in b43_piotx_write32() argument
127 b43_write32(q->dev, q->mmio_base + offset, value); in b43_piotx_write32()
131 static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset) in b43_piorx_read16() argument
133 return b43_read16(q->dev, q->mmio_base + offset); in b43_piorx_read16()
[all …]
/linux-4.4.14/arch/sh/kernel/cpu/sh5/
Dswitchto.S66 st.q r0, ( 9*8), r9
67 st.q r0, (10*8), r10
68 st.q r0, (11*8), r11
69 st.q r0, (12*8), r12
70 st.q r0, (13*8), r13
71 st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
74 st.q r0, (16*8), r16
76 st.q r0, (24*8), r24
77 st.q r0, (25*8), r25
78 st.q r0, (26*8), r26
[all …]
Dentry.S261 st.q SP, SAVED_R2, r2
262 st.q SP, SAVED_R3, r3
263 st.q SP, SAVED_R4, r4
264 st.q SP, SAVED_R5, r5
265 st.q SP, SAVED_R6, r6
266 st.q SP, SAVED_R18, r18
268 st.q SP, SAVED_TR0, r3
302 st.q SP, TLB_SAVED_R0 , r0
303 st.q SP, TLB_SAVED_R1 , r1
304 st.q SP, SAVED_R2 , r2
[all …]
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb3/
Dsge.c167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) in fl_to_qset() argument
169 return container_of(q, struct sge_qset, fl[qidx]); in fl_to_qset()
172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument
174 return container_of(q, struct sge_qset, rspq); in rspq_to_qset()
177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
192 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument
240 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
[all …]
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c217 static inline unsigned int txq_avail(const struct sge_txq *q) in txq_avail() argument
219 return q->size - 1 - q->in_use; in txq_avail()
312 const struct ulptx_sgl *sgl, const struct sge_txq *q) in unmap_sgl() argument
331 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { in unmap_sgl()
337 } else if ((u8 *)p == (u8 *)q->stat) { in unmap_sgl()
338 p = (const struct ulptx_sge_pair *)q->desc; in unmap_sgl()
340 } else if ((u8 *)p + 8 == (u8 *)q->stat) { in unmap_sgl()
341 const __be64 *addr = (const __be64 *)q->desc; in unmap_sgl()
349 const __be64 *addr = (const __be64 *)q->desc; in unmap_sgl()
361 if ((u8 *)p == (u8 *)q->stat) in unmap_sgl()
[all …]
Dcxgb4.h506 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
605 struct sge_txq q; member
617 struct sge_txq q; member
626 struct sge_txq q; member
972 static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) in cxgb_busy_poll_init_lock() argument
974 spin_lock_init(&q->bpoll_lock); in cxgb_busy_poll_init_lock()
975 q->bpoll_state = CXGB_POLL_STATE_IDLE; in cxgb_busy_poll_init_lock()
978 static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) in cxgb_poll_lock_napi() argument
982 spin_lock(&q->bpoll_lock); in cxgb_poll_lock_napi()
983 if (q->bpoll_state & CXGB_POLL_LOCKED) { in cxgb_poll_lock_napi()
[all …]
/linux-4.4.14/include/linux/
Dblkdev.h56 struct request_queue *q; /* the queue this rl belongs to */ member
95 struct request_queue *q; member
211 typedef void (request_fn_proc) (struct request_queue *q);
212 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
219 typedef int (lld_busy_fn) (struct request_queue *q);
502 static inline void queue_lockdep_assert_held(struct request_queue *q) in queue_lockdep_assert_held() argument
504 if (q->queue_lock) in queue_lockdep_assert_held()
505 lockdep_assert_held(q->queue_lock); in queue_lockdep_assert_held()
509 struct request_queue *q) in queue_flag_set_unlocked() argument
511 __set_bit(flag, &q->queue_flags); in queue_flag_set_unlocked()
[all …]
Dblk-cgroup.h107 struct request_queue *q; member
173 struct request_queue *q, bool update_hint);
175 struct request_queue *q);
176 int blkcg_init_queue(struct request_queue *q);
177 void blkcg_drain_queue(struct request_queue *q);
178 void blkcg_exit_queue(struct request_queue *q);
183 int blkcg_activate_policy(struct request_queue *q,
185 void blkcg_deactivate_policy(struct request_queue *q,
267 struct request_queue *q, in __blkg_lookup() argument
273 return q->root_blkg; in __blkg_lookup()
[all …]
Dblktrace_api.h33 extern int do_blk_trace_setup(struct request_queue *q, char *name,
52 #define blk_add_trace_msg(q, fmt, ...) \ argument
54 struct blk_trace *bt = (q)->blk_trace; \
60 extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
62 extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
65 extern int blk_trace_startstop(struct request_queue *q, int start);
66 extern int blk_trace_remove(struct request_queue *q);
74 # define blk_trace_shutdown(q) do { } while (0) argument
75 # define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) argument
76 # define blk_add_driver_data(q, rq, data, len) do {} while (0) argument
[all …]
Dblk-mq.h178 struct request_queue *q);
191 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
223 void blk_mq_cancel_requeue_work(struct request_queue *q);
224 void blk_mq_kick_requeue_list(struct request_queue *q);
225 void blk_mq_abort_requeue_list(struct request_queue *q);
230 void blk_mq_stop_hw_queues(struct request_queue *q);
231 void blk_mq_start_hw_queues(struct request_queue *q);
232 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
233 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
237 void blk_mq_freeze_queue(struct request_queue *q);
[all …]
Dquicklist.h35 struct quicklist *q; in quicklist_alloc() local
38 q =&get_cpu_var(quicklist)[nr]; in quicklist_alloc()
39 p = q->page; in quicklist_alloc()
41 q->page = p[0]; in quicklist_alloc()
43 q->nr_pages--; in quicklist_alloc()
58 struct quicklist *q; in __quicklist_free() local
60 q = &get_cpu_var(quicklist)[nr]; in __quicklist_free()
61 *(void **)p = q->page; in __quicklist_free()
62 q->page = p; in __quicklist_free()
63 q->nr_pages++; in __quicklist_free()
Dwait.h72 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
74 #define init_waitqueue_head(q) \ argument
78 __init_waitqueue_head((q), #q, &__key); \
90 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) in init_waitqueue_entry() argument
92 q->flags = 0; in init_waitqueue_entry()
93 q->private = p; in init_waitqueue_entry()
94 q->func = default_wake_function; in init_waitqueue_entry()
98 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) in init_waitqueue_func_entry() argument
100 q->flags = 0; in init_waitqueue_func_entry()
101 q->private = NULL; in init_waitqueue_func_entry()
[all …]
/linux-4.4.14/include/media/
Dvideobuf-core.h53 struct videobuf_queue *q; member
106 int (*buf_setup)(struct videobuf_queue *q,
108 int (*buf_prepare)(struct videobuf_queue *q,
111 void (*buf_queue)(struct videobuf_queue *q,
113 void (*buf_release)(struct videobuf_queue *q,
125 int (*iolock) (struct videobuf_queue *q,
128 int (*sync) (struct videobuf_queue *q,
130 int (*mmap_mapper) (struct videobuf_queue *q,
165 static inline void videobuf_queue_lock(struct videobuf_queue *q) in videobuf_queue_lock() argument
167 if (!q->ext_lock) in videobuf_queue_lock()
[all …]
Dvideobuf2-core.h347 int (*queue_setup)(struct vb2_queue *q, const void *parg,
351 void (*wait_prepare)(struct vb2_queue *q);
352 void (*wait_finish)(struct vb2_queue *q);
359 int (*start_streaming)(struct vb2_queue *q, unsigned int count);
360 void (*stop_streaming)(struct vb2_queue *q);
504 void vb2_discard_done(struct vb2_queue *q);
505 int vb2_wait_for_all_buffers(struct vb2_queue *q);
507 int vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
508 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
510 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
[all …]
Dvideobuf2-v4l2.h53 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
54 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
56 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
57 int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
59 int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
60 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
61 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
63 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
64 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
66 int __must_check vb2_queue_init(struct vb2_queue *q);
[all …]
/linux-4.4.14/kernel/sched/
Dwait.c14 void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key) in __init_waitqueue_head() argument
16 spin_lock_init(&q->lock); in __init_waitqueue_head()
17 lockdep_set_class_and_name(&q->lock, key, name); in __init_waitqueue_head()
18 INIT_LIST_HEAD(&q->task_list); in __init_waitqueue_head()
23 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) in add_wait_queue() argument
28 spin_lock_irqsave(&q->lock, flags); in add_wait_queue()
29 __add_wait_queue(q, wait); in add_wait_queue()
30 spin_unlock_irqrestore(&q->lock, flags); in add_wait_queue()
34 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) in add_wait_queue_exclusive() argument
39 spin_lock_irqsave(&q->lock, flags); in add_wait_queue_exclusive()
[all …]
/linux-4.4.14/drivers/net/
Dmacvtap.c54 static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) in macvtap_legacy_is_little_endian() argument
56 return q->flags & MACVTAP_VNET_BE ? false : in macvtap_legacy_is_little_endian()
60 static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp) in macvtap_get_vnet_be() argument
62 int s = !!(q->flags & MACVTAP_VNET_BE); in macvtap_get_vnet_be()
70 static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp) in macvtap_set_vnet_be() argument
78 q->flags |= MACVTAP_VNET_BE; in macvtap_set_vnet_be()
80 q->flags &= ~MACVTAP_VNET_BE; in macvtap_set_vnet_be()
85 static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) in macvtap_legacy_is_little_endian() argument
90 static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp) in macvtap_get_vnet_be() argument
95 static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp) in macvtap_set_vnet_be() argument
[all …]
/linux-4.4.14/drivers/net/ethernet/mellanox/mlxsw/
Dpci.c165 struct mlxsw_pci_queue *q; member
196 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) in mlxsw_pci_queue_tasklet_schedule() argument
198 tasklet_schedule(&q->tasklet); in mlxsw_pci_queue_tasklet_schedule()
201 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, in __mlxsw_pci_queue_elem_get() argument
204 return q->mem_item.buf + (elem_size * elem_index); in __mlxsw_pci_queue_elem_get()
208 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index) in mlxsw_pci_queue_elem_info_get() argument
210 return &q->elem_info[elem_index]; in mlxsw_pci_queue_elem_info_get()
214 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) in mlxsw_pci_queue_elem_info_producer_get() argument
216 int index = q->producer_counter & (q->count - 1); in mlxsw_pci_queue_elem_info_producer_get()
218 if ((q->producer_counter - q->consumer_counter) == q->count) in mlxsw_pci_queue_elem_info_producer_get()
[all …]
/linux-4.4.14/drivers/scsi/arm/
Dqueue.c42 #define SET_MAGIC(q,m) ((q)->magic = (m)) argument
43 #define BAD_MAGIC(q,m) ((q)->magic != (m)) argument
45 #define SET_MAGIC(q,m) do { } while (0) argument
46 #define BAD_MAGIC(q,m) (0) argument
61 QE_t *q; in queue_initialise() local
73 queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL); in queue_initialise()
74 if (q) { in queue_initialise()
75 for (; nqueues; q++, nqueues--) { in queue_initialise()
76 SET_MAGIC(q, QUEUE_MAGIC_FREE); in queue_initialise()
77 q->SCpnt = NULL; in queue_initialise()
[all …]
/linux-4.4.14/lib/raid6/
Dsse2.c44 u8 *p, *q; in raid6_sse21_gen_syndrome() local
49 q = dptr[z0+2]; /* RS syndrome */ in raid6_sse21_gen_syndrome()
83 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); in raid6_sse21_gen_syndrome()
96 u8 *p, *q; in raid6_sse21_xor_syndrome() local
101 q = dptr[disks-1]; /* RS syndrome */ in raid6_sse21_xor_syndrome()
130 asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); in raid6_sse21_xor_syndrome()
132 asm volatile("movdqa %%xmm4,%0" : "=m" (q[d])); in raid6_sse21_xor_syndrome()
154 u8 *p, *q; in raid6_sse22_gen_syndrome() local
159 q = dptr[z0+2]; /* RS syndrome */ in raid6_sse22_gen_syndrome()
195 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); in raid6_sse22_gen_syndrome()
[all …]
Drecov_avx2.c25 u8 *p, *q, *dp, *dq; in raid6_2data_recov_avx2() local
31 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_avx2()
49 ptrs[disks-1] = q; in raid6_2data_recov_avx2()
63 asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0])); in raid6_2data_recov_avx2()
64 asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32])); in raid6_2data_recov_avx2()
136 q += 64; in raid6_2data_recov_avx2()
140 asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q)); in raid6_2data_recov_avx2()
183 q += 32; in raid6_2data_recov_avx2()
195 u8 *p, *q, *dq; in raid6_datap_recov_avx2() local
200 q = (u8 *)ptrs[disks-1]; in raid6_datap_recov_avx2()
[all …]
Drecov_ssse3.c25 u8 *p, *q, *dp, *dq; in raid6_2data_recov_ssse3() local
33 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_ssse3()
51 ptrs[disks-1] = q; in raid6_2data_recov_ssse3()
73 asm volatile("movdqa %0,%%xmm1" : : "m" (q[0])); in raid6_2data_recov_ssse3()
74 asm volatile("movdqa %0,%%xmm9" : : "m" (q[16])); in raid6_2data_recov_ssse3()
140 q += 32; in raid6_2data_recov_ssse3()
144 asm volatile("movdqa %0,%%xmm1" : : "m" (*q)); in raid6_2data_recov_ssse3()
187 q += 16; in raid6_2data_recov_ssse3()
200 u8 *p, *q, *dq; in raid6_datap_recov_ssse3() local
207 q = (u8 *)ptrs[disks-1]; in raid6_datap_recov_ssse3()
[all …]
Davx2.c45 u8 *p, *q; in raid6_avx21_gen_syndrome() local
50 q = dptr[z0+2]; /* RS syndrome */ in raid6_avx21_gen_syndrome()
82 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); in raid6_avx21_gen_syndrome()
104 u8 *p, *q; in raid6_avx22_gen_syndrome() local
109 q = dptr[z0+2]; /* RS syndrome */ in raid6_avx22_gen_syndrome()
144 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); in raid6_avx22_gen_syndrome()
145 asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); in raid6_avx22_gen_syndrome()
168 u8 *p, *q; in raid6_avx24_gen_syndrome() local
173 q = dptr[z0+2]; /* RS syndrome */ in raid6_avx24_gen_syndrome()
231 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); in raid6_avx24_gen_syndrome()
[all …]
Drecov.c28 u8 *p, *q, *dp, *dq; in raid6_2data_recov_intx1() local
34 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_intx1()
52 ptrs[disks-1] = q; in raid6_2data_recov_intx1()
61 qx = qmul[*q ^ *dq]; in raid6_2data_recov_intx1()
64 p++; q++; in raid6_2data_recov_intx1()
72 u8 *p, *q, *dq; in raid6_datap_recov_intx1() local
76 q = (u8 *)ptrs[disks-1]; in raid6_datap_recov_intx1()
88 ptrs[disks-1] = q; in raid6_datap_recov_intx1()
95 *p++ ^= *dq = qmul[*q ^ *dq]; in raid6_datap_recov_intx1()
96 q++; dq++; in raid6_datap_recov_intx1()
Dmmx.c43 u8 *p, *q; in raid6_mmx1_gen_syndrome() local
48 q = dptr[z0+2]; /* RS syndrome */ in raid6_mmx1_gen_syndrome()
70 asm volatile("movq %%mm4,%0" : "=m" (q[d])); in raid6_mmx1_gen_syndrome()
91 u8 *p, *q; in raid6_mmx2_gen_syndrome() local
96 q = dptr[z0+2]; /* RS syndrome */ in raid6_mmx2_gen_syndrome()
129 asm volatile("movq %%mm4,%0" : "=m" (q[d])); in raid6_mmx2_gen_syndrome()
130 asm volatile("movq %%mm6,%0" : "=m" (q[d+8])); in raid6_mmx2_gen_syndrome()
Dsse1.c48 u8 *p, *q; in raid6_sse11_gen_syndrome() local
53 q = dptr[z0+2]; /* RS syndrome */ in raid6_sse11_gen_syndrome()
86 asm volatile("movntq %%mm4,%0" : "=m" (q[d])); in raid6_sse11_gen_syndrome()
107 u8 *p, *q; in raid6_sse12_gen_syndrome() local
112 q = dptr[z0+2]; /* RS syndrome */ in raid6_sse12_gen_syndrome()
148 asm volatile("movntq %%mm4,%0" : "=m" (q[d])); in raid6_sse12_gen_syndrome()
149 asm volatile("movntq %%mm6,%0" : "=m" (q[d+8])); in raid6_sse12_gen_syndrome()
/linux-4.4.14/net/sctp/
Doutqueue.c56 static void sctp_check_transmitted(struct sctp_outq *q,
63 static void sctp_mark_missing(struct sctp_outq *q,
69 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
71 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
74 static inline void sctp_outq_head_data(struct sctp_outq *q, in sctp_outq_head_data() argument
77 list_add(&ch->list, &q->out_chunk_list); in sctp_outq_head_data()
78 q->out_qlen += ch->skb->len; in sctp_outq_head_data()
82 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) in sctp_outq_dequeue_data() argument
86 if (!list_empty(&q->out_chunk_list)) { in sctp_outq_dequeue_data()
87 struct list_head *entry = q->out_chunk_list.next; in sctp_outq_dequeue_data()
[all …]
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmutil/
Dutils.c61 struct sk_buff_head *q; in brcmu_pktq_penq() local
66 q = &pq->q[prec].skblist; in brcmu_pktq_penq()
67 skb_queue_tail(q, p); in brcmu_pktq_penq()
80 struct sk_buff_head *q; in brcmu_pktq_penq_head() local
85 q = &pq->q[prec].skblist; in brcmu_pktq_penq_head()
86 skb_queue_head(q, p); in brcmu_pktq_penq_head()
98 struct sk_buff_head *q; in brcmu_pktq_pdeq() local
101 q = &pq->q[prec].skblist; in brcmu_pktq_pdeq()
102 p = skb_dequeue(q); in brcmu_pktq_pdeq()
121 struct sk_buff_head *q; in brcmu_pktq_pdeq_match() local
[all …]
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb/
Dsge.c480 struct cmdQ *q = &sge->cmdQ[0]; in sched_skb() local
481 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in sched_skb()
482 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { in sched_skb()
483 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in sched_skb()
505 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) in free_freelQ_buffers() argument
507 unsigned int cidx = q->cidx; in free_freelQ_buffers()
509 while (q->credits--) { in free_freelQ_buffers()
510 struct freelQ_ce *ce = &q->centries[cidx]; in free_freelQ_buffers()
517 if (++cidx == q->size) in free_freelQ_buffers()
537 struct freelQ *q = &sge->freelQ[i]; in free_rx_resources() local
[all …]
/linux-4.4.14/arch/powerpc/lib/
Dcode-patching.c348 unsigned int *p, *q; in test_translate_branch() local
361 q = p + 1; in test_translate_branch()
362 patch_instruction(q, translate_branch(q, p)); in test_translate_branch()
363 check(instr_is_branch_to_addr(q, addr)); in test_translate_branch()
369 q = buf + 0x2000000; in test_translate_branch()
370 patch_instruction(q, translate_branch(q, p)); in test_translate_branch()
372 check(instr_is_branch_to_addr(q, addr)); in test_translate_branch()
373 check(*q == 0x4a000000); in test_translate_branch()
379 q = buf + 4; in test_translate_branch()
380 patch_instruction(q, translate_branch(q, p)); in test_translate_branch()
[all …]
/linux-4.4.14/arch/x86/kernel/
Dpci-iommu_table.c12 struct iommu_table_entry *q) in find_dependents_of() argument
16 if (!q) in find_dependents_of()
20 if (p->detect == q->depend) in find_dependents_of()
30 struct iommu_table_entry *p, *q, tmp; in sort_iommu_table() local
34 q = find_dependents_of(start, finish, p); in sort_iommu_table()
38 if (q > p) { in sort_iommu_table()
40 memmove(p, q, sizeof(*p)); in sort_iommu_table()
41 *q = tmp; in sort_iommu_table()
52 struct iommu_table_entry *p, *q, *x; in check_iommu_entries() local
56 q = find_dependents_of(start, finish, p); in check_iommu_entries()
[all …]
/linux-4.4.14/drivers/scsi/csiostor/
Dcsio_wr.c190 struct csio_q *q, *flq; in csio_wr_alloc_q() local
233 q = wrm->q_arr[free_idx]; in csio_wr_alloc_q()
235 q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart); in csio_wr_alloc_q()
236 if (!q->vstart) { in csio_wr_alloc_q()
243 q->type = type; in csio_wr_alloc_q()
244 q->owner = owner; in csio_wr_alloc_q()
245 q->pidx = q->cidx = q->inc_idx = 0; in csio_wr_alloc_q()
246 q->size = qsz; in csio_wr_alloc_q()
247 q->wr_sz = wrsize; /* If using fixed size WRs */ in csio_wr_alloc_q()
253 q->un.iq.genbit = 1; in csio_wr_alloc_q()
[all …]
/linux-4.4.14/net/ipv4/
Dip_fragment.c71 struct inet_frag_queue q; member
115 static unsigned int ip4_hashfn(const struct inet_frag_queue *q) in ip4_hashfn() argument
119 ipq = container_of(q, struct ipq, q); in ip4_hashfn()
123 static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) in ip4_frag_match() argument
128 qp = container_of(q, struct ipq, q); in ip4_frag_match()
137 static void ip4_frag_init(struct inet_frag_queue *q, const void *a) in ip4_frag_init() argument
139 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init()
140 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, in ip4_frag_init()
158 static void ip4_frag_free(struct inet_frag_queue *q) in ip4_frag_free() argument
162 qp = container_of(q, struct ipq, q); in ip4_frag_free()
[all …]
Dinet_fragment.c56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q) in inet_frag_hashfn() argument
58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1); in inet_frag_hashfn()
80 struct inet_frag_queue *q; in inet_frag_secret_rebuild() local
86 hlist_for_each_entry_safe(q, n, &hb->chain, list) { in inet_frag_secret_rebuild()
87 unsigned int hval = inet_frag_hashfn(f, q); in inet_frag_secret_rebuild()
92 hlist_del(&q->list); in inet_frag_secret_rebuild()
107 hlist_add_head(&q->list, &hb_dest->chain); in inet_frag_secret_rebuild()
120 static bool inet_fragq_should_evict(const struct inet_frag_queue *q) in inet_fragq_should_evict() argument
122 return q->net->low_thresh == 0 || in inet_fragq_should_evict()
123 frag_mem_limit(q->net) >= q->net->low_thresh; in inet_fragq_should_evict()
[all …]
/linux-4.4.14/arch/sh/lib64/
Dcopy_user_memcpy.S61 #define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
62 #define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
108 stlo.q r2, 0, r0
110 sthi.q r5, -1, r6
111 stlo.q r5, -8, r6
136 ldlo.q r6, -8, r7
138 sthi.q r2, 7, r0
139 ldhi.q r6, -1, r6
146 sthi.q r2, 7, r0
148 sthi.q r2, 15, r8
[all …]
Dmemcpy.S46 #define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
47 #define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
93 stlo.q r2, 0, r0
95 sthi.q r5, -1, r6
96 stlo.q r5, -8, r6
121 ldlo.q r6, -8, r7
123 sthi.q r2, 7, r0
124 ldhi.q r6, -1, r6
131 sthi.q r2, 7, r0
133 sthi.q r2, 15, r8
[all …]
Dcopy_page.S44 ld.q r3, 0x00, r63
45 ld.q r3, 0x20, r63
46 ld.q r3, 0x40, r63
47 ld.q r3, 0x60, r63
71 ldx.q r2, r22, r63 ! prefetch 4 lines hence
78 ldx.q r2, r60, r36
79 ldx.q r2, r61, r37
80 ldx.q r2, r62, r38
81 ldx.q r2, r23, r39
82 st.q r2, 0, r36
[all …]
Dstrcpy.S25 ldlo.q r3,0,r4
37 ldx.q r0, r21, r5
46 ldlo.q r2, 0, r9
50 stlo.q r2, 0, r9
59 stlo.q r2, 0, r4
61 sthi.q r0, -1, r4
81 stlo.q r0, 0, r5
82 ldx.q r0, r20, r4
84 sthi.q r0, -9, r5
87 ldx.q r0, r21, r5
[all …]
/linux-4.4.14/net/ieee802154/6lowpan/
Dreassembly.c51 static unsigned int lowpan_hashfn(const struct inet_frag_queue *q) in lowpan_hashfn() argument
55 fq = container_of(q, struct lowpan_frag_queue, q); in lowpan_hashfn()
59 static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a) in lowpan_frag_match() argument
64 fq = container_of(q, struct lowpan_frag_queue, q); in lowpan_frag_match()
70 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) in lowpan_frag_init() argument
75 fq = container_of(q, struct lowpan_frag_queue, q); in lowpan_frag_init()
88 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); in lowpan_frag_expire()
89 net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags); in lowpan_frag_expire()
91 spin_lock(&fq->q.lock); in lowpan_frag_expire()
93 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_expire()
[all …]
/linux-4.4.14/drivers/net/wireless/mediatek/mt7601u/
Ddma.c172 struct mt7601u_rx_queue *q = &dev->rx_q; in mt7601u_rx_get_pending_entry() local
178 if (!q->pending) in mt7601u_rx_get_pending_entry()
181 buf = &q->e[q->start]; in mt7601u_rx_get_pending_entry()
182 q->pending--; in mt7601u_rx_get_pending_entry()
183 q->start = (q->start + 1) % q->entries; in mt7601u_rx_get_pending_entry()
193 struct mt7601u_rx_queue *q = &dev->rx_q; in mt7601u_complete_rx() local
200 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch")) in mt7601u_complete_rx()
203 q->end = (q->end + 1) % q->entries; in mt7601u_complete_rx()
204 q->pending++; in mt7601u_complete_rx()
226 struct mt7601u_tx_queue *q = urb->context; in mt7601u_complete_tx() local
[all …]
/linux-4.4.14/net/ipv6/
Dreassembly.c94 static unsigned int ip6_hashfn(const struct inet_frag_queue *q) in ip6_hashfn() argument
98 fq = container_of(q, struct frag_queue, q); in ip6_hashfn()
102 bool ip6_frag_match(const struct inet_frag_queue *q, const void *a) in ip6_frag_match() argument
107 fq = container_of(q, struct frag_queue, q); in ip6_frag_match()
118 void ip6_frag_init(struct inet_frag_queue *q, const void *a) in ip6_frag_init() argument
120 struct frag_queue *fq = container_of(q, struct frag_queue, q); in ip6_frag_init()
136 spin_lock(&fq->q.lock); in ip6_expire_frag_queue()
138 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6_expire_frag_queue()
141 inet_frag_kill(&fq->q, frags); in ip6_expire_frag_queue()
150 if (inet_frag_evicting(&fq->q)) in ip6_expire_frag_queue()
[all …]
/linux-4.4.14/drivers/net/ethernet/renesas/
Dravb_main.c175 static void ravb_ring_free(struct net_device *ndev, int q) in ravb_ring_free() argument
182 if (priv->rx_skb[q]) { in ravb_ring_free()
183 for (i = 0; i < priv->num_rx_ring[q]; i++) in ravb_ring_free()
184 dev_kfree_skb(priv->rx_skb[q][i]); in ravb_ring_free()
186 kfree(priv->rx_skb[q]); in ravb_ring_free()
187 priv->rx_skb[q] = NULL; in ravb_ring_free()
190 if (priv->tx_skb[q]) { in ravb_ring_free()
191 for (i = 0; i < priv->num_tx_ring[q]; i++) in ravb_ring_free()
192 dev_kfree_skb(priv->tx_skb[q][i]); in ravb_ring_free()
194 kfree(priv->tx_skb[q]); in ravb_ring_free()
[all …]
/linux-4.4.14/drivers/net/wireless/iwlwifi/pcie/
Dtx.c68 static int iwl_queue_space(const struct iwl_queue *q) in iwl_queue_space() argument
79 if (q->n_window < TFD_QUEUE_SIZE_MAX) in iwl_queue_space()
80 max = q->n_window; in iwl_queue_space()
88 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); in iwl_queue_space()
99 static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) in iwl_queue_init() argument
101 q->n_window = slots_num; in iwl_queue_init()
102 q->id = id; in iwl_queue_init()
109 q->low_mark = q->n_window / 4; in iwl_queue_init()
110 if (q->low_mark < 4) in iwl_queue_init()
111 q->low_mark = 4; in iwl_queue_init()
[all …]
Dinternal.h268 struct iwl_queue q; member
495 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { in iwl_wake_queue()
496 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); in iwl_wake_queue()
497 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); in iwl_wake_queue()
506 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { in iwl_stop_queue()
507 iwl_op_mode_queue_full(trans->op_mode, txq->q.id); in iwl_stop_queue()
508 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); in iwl_stop_queue()
511 txq->q.id); in iwl_stop_queue()
514 static inline bool iwl_queue_used(const struct iwl_queue *q, int i) in iwl_queue_used() argument
516 return q->write_ptr >= q->read_ptr ? in iwl_queue_used()
[all …]
/linux-4.4.14/net/netfilter/
Dxt_quota.c28 struct xt_quota_info *q = (void *)par->matchinfo; in quota_mt() local
29 struct xt_quota_priv *priv = q->master; in quota_mt()
30 bool ret = q->flags & XT_QUOTA_INVERT; in quota_mt()
47 struct xt_quota_info *q = par->matchinfo; in quota_mt_check() local
49 if (q->flags & ~XT_QUOTA_MASK) in quota_mt_check()
52 q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); in quota_mt_check()
53 if (q->master == NULL) in quota_mt_check()
56 spin_lock_init(&q->master->lock); in quota_mt_check()
57 q->master->quota = q->quota; in quota_mt_check()
63 const struct xt_quota_info *q = par->matchinfo; in quota_mt_destroy() local
[all …]
Dnfnetlink_queue.c99 instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num) in instance_lookup() argument
104 head = &q->instance_table[instance_hashfn(queue_num)]; in instance_lookup()
113 instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid) in instance_create() argument
119 spin_lock(&q->instances_lock); in instance_create()
120 if (instance_lookup(q, queue_num)) { in instance_create()
145 hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]); in instance_create()
147 spin_unlock(&q->instances_lock); in instance_create()
154 spin_unlock(&q->instances_lock); in instance_create()
180 instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst) in instance_destroy() argument
182 spin_lock(&q->instances_lock); in instance_destroy()
[all …]
/linux-4.4.14/include/trace/events/
Dvb2.h11 TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
12 TP_ARGS(q, vb),
24 __entry->owner = q->owner;
25 __entry->queued_count = q->queued_count;
27 atomic_read(&q->owned_by_drv_count);
43 TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
44 TP_ARGS(q, vb)
48 TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
49 TP_ARGS(q, vb)
53 TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
[all …]
Dblock.h66 TP_PROTO(struct request_queue *q, struct request *rq),
68 TP_ARGS(q, rq),
110 TP_PROTO(struct request_queue *q, struct request *rq),
112 TP_ARGS(q, rq)
126 TP_PROTO(struct request_queue *q, struct request *rq),
128 TP_ARGS(q, rq)
145 TP_PROTO(struct request_queue *q, struct request *rq,
148 TP_ARGS(q, rq, nr_bytes),
178 TP_PROTO(struct request_queue *q, struct request *rq),
180 TP_ARGS(q, rq),
[all …]
/linux-4.4.14/arch/alpha/include/asm/
Dcore_wildfire.h226 #define WILDFIRE_QBB(q) ((~((long)(q)) & WILDFIRE_QBB_MASK) << 36) argument
229 #define WILDFIRE_QBB_IO(q) (WILDFIRE_BASE | WILDFIRE_QBB(q)) argument
230 #define WILDFIRE_QBB_HOSE(q,h) (WILDFIRE_QBB_IO(q) | WILDFIRE_HOSE(h)) argument
232 #define WILDFIRE_MEM(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x000000000UL) argument
233 #define WILDFIRE_CONF(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FE000000UL) argument
234 #define WILDFIRE_IO(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FF000000UL) argument
236 #define WILDFIRE_qsd(q) \ argument
237 ((wildfire_qsd *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSD_ENTITY_SLOW|(((1UL<<13)-1)<<23)))
242 #define WILDFIRE_qsa(q) \ argument
243 ((wildfire_qsa *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSA_ENTITY|(((1UL<<13)-1)<<23)))
[all …]
/linux-4.4.14/drivers/net/wireless/ath/ath9k/
Dmac.c46 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) in ath9k_hw_gettxbuf() argument
48 return REG_READ(ah, AR_QTXDP(q)); in ath9k_hw_gettxbuf()
52 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) in ath9k_hw_puttxbuf() argument
54 REG_WRITE(ah, AR_QTXDP(q), txdp); in ath9k_hw_puttxbuf()
58 void ath9k_hw_txstart(struct ath_hw *ah, u32 q) in ath9k_hw_txstart() argument
60 ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q); in ath9k_hw_txstart()
61 REG_WRITE(ah, AR_Q_TXE, 1 << q); in ath9k_hw_txstart()
65 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) in ath9k_hw_numtxpending() argument
69 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; in ath9k_hw_numtxpending()
72 if (REG_READ(ah, AR_Q_TXE) & (1 << q)) in ath9k_hw_numtxpending()
[all …]
/linux-4.4.14/drivers/iommu/
Darm-smmu-v3.c189 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1)) argument
190 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift)) argument
192 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG) argument
193 #define Q_ENT(q, p) ((q)->base + \ argument
194 Q_IDX(q, p) * (q)->ent_dwords)
499 struct arm_smmu_queue q; member
504 struct arm_smmu_queue q; member
509 struct arm_smmu_queue q; member
664 static bool queue_full(struct arm_smmu_queue *q) in queue_full() argument
666 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && in queue_full()
[all …]
/linux-4.4.14/drivers/tty/vt/
Dconsolemap.c190 unsigned char *q; in set_inverse_transl() local
193 q = p->inverse_translations[i]; in set_inverse_transl()
195 if (!q) { in set_inverse_transl()
196 q = p->inverse_translations[i] = kmalloc(MAX_GLYPH, GFP_KERNEL); in set_inverse_transl()
197 if (!q) return; in set_inverse_transl()
199 memset(q, 0, MAX_GLYPH); in set_inverse_transl()
203 if (glyph >= 0 && glyph < MAX_GLYPH && q[glyph] < 32) { in set_inverse_transl()
205 q[glyph] = j; in set_inverse_transl()
215 u16 *q; in set_inverse_trans_unicode() local
218 q = p->inverse_trans_unicode; in set_inverse_trans_unicode()
[all …]
/linux-4.4.14/drivers/scsi/be2iscsi/
Dbe.h59 static inline void *queue_head_node(struct be_queue_info *q) in queue_head_node() argument
61 return q->dma_mem.va + q->head * q->entry_size; in queue_head_node()
64 static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num) in queue_get_wrb() argument
66 return q->dma_mem.va + wrb_num * q->entry_size; in queue_get_wrb()
69 static inline void *queue_tail_node(struct be_queue_info *q) in queue_tail_node() argument
71 return q->dma_mem.va + q->tail * q->entry_size; in queue_tail_node()
74 static inline void queue_head_inc(struct be_queue_info *q) in queue_head_inc() argument
76 index_inc(&q->head, q->len); in queue_head_inc()
79 static inline void queue_tail_inc(struct be_queue_info *q) in queue_tail_inc() argument
81 index_inc(&q->tail, q->len); in queue_tail_inc()
[all …]
/linux-4.4.14/lib/
Dts_kmp.c49 unsigned int i, q = 0, text_len, consumed = state->offset; in kmp_find() local
60 while (q > 0 && kmp->pattern[q] in kmp_find()
62 q = kmp->prefix_tbl[q - 1]; in kmp_find()
63 if (kmp->pattern[q] in kmp_find()
65 q++; in kmp_find()
66 if (unlikely(q == kmp->pattern_len)) { in kmp_find()
81 unsigned int k, q; in compute_prefix_tbl() local
84 for (k = 0, q = 1; q < len; q++) { in compute_prefix_tbl()
86 != (icase ? toupper(pattern[q]) : pattern[q])) in compute_prefix_tbl()
89 == (icase ? toupper(pattern[q]) : pattern[q])) in compute_prefix_tbl()
[all …]
Dstring_helpers.c130 char *p = *dst, *q = *src; in unescape_space() local
132 switch (*q) { in unescape_space()
158 char *p = *dst, *q = *src; in unescape_octal() local
161 if (isodigit(*q) == 0) in unescape_octal()
164 num = (*q++) & 7; in unescape_octal()
165 while (num < 32 && isodigit(*q) && (q - *src < 3)) { in unescape_octal()
167 num += (*q++) & 7; in unescape_octal()
171 *src = q; in unescape_octal()
177 char *p = *dst, *q = *src; in unescape_hex() local
181 if (*q++ != 'x') in unescape_hex()
[all …]
Dcordic.c64 coord.q = 0; in cordic_calc_iq()
82 valtmp = coord.i - (coord.q >> iter); in cordic_calc_iq()
83 coord.q += (coord.i >> iter); in cordic_calc_iq()
86 valtmp = coord.i + (coord.q >> iter); in cordic_calc_iq()
87 coord.q -= (coord.i >> iter); in cordic_calc_iq()
94 coord.q *= signx; in cordic_calc_iq()
Dcrc32.c61 # define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \
62 t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255])
63 # define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \
64 t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255])
67 # define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \
68 t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255])
69 # define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \
70 t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255])
81 u32 q; local
105 q = crc ^ *++b; /* use pre increment for speed */
[all …]
/linux-4.4.14/drivers/net/fddi/skfp/
Dqueue.c36 smc->q.ev_put = smc->q.ev_get = smc->q.ev_queue ; in ev_init()
45 smc->q.ev_put->class = class ; in queue_event()
46 smc->q.ev_put->event = event ; in queue_event()
47 if (++smc->q.ev_put == &smc->q.ev_queue[MAX_EVENT]) in queue_event()
48 smc->q.ev_put = smc->q.ev_queue ; in queue_event()
50 if (smc->q.ev_put == smc->q.ev_get) { in queue_event()
78 ev = smc->q.ev_get ; in ev_dispatcher()
79 PRINTF("dispatch get %x put %x\n",ev,smc->q.ev_put) ; in ev_dispatcher()
80 while (ev != smc->q.ev_put) { in ev_dispatcher()
112 if (++ev == &smc->q.ev_queue[MAX_EVENT]) in ev_dispatcher()
[all …]
/linux-4.4.14/net/ipv6/netfilter/
Dnf_conntrack_reasm.c165 static unsigned int nf_hashfn(const struct inet_frag_queue *q) in nf_hashfn() argument
169 nq = container_of(q, struct frag_queue, q); in nf_hashfn()
184 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); in nf_ct_frag6_expire()
185 net = container_of(fq->q.net, struct net, nf_frag.frags); in nf_ct_frag6_expire()
195 struct inet_frag_queue *q; in fq_find() local
209 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); in fq_find()
211 if (IS_ERR_OR_NULL(q)) { in fq_find()
212 inet_frag_maybe_warn_overflow(q, pr_fmt()); in fq_find()
215 return container_of(q, struct frag_queue, q); in fq_find()
227 if (fq->q.flags & INET_FRAG_COMPLETE) { in nf_ct_frag6_queue()
[all …]
/linux-4.4.14/drivers/md/
Ddm-cache-policy-smq.c254 static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels) in q_init() argument
258 q->es = es; in q_init()
259 q->nr_elts = 0; in q_init()
260 q->nr_levels = nr_levels; in q_init()
262 for (i = 0; i < q->nr_levels; i++) { in q_init()
263 l_init(q->qs + i); in q_init()
264 q->target_count[i] = 0u; in q_init()
267 q->last_target_nr_elts = 0u; in q_init()
268 q->nr_top_levels = 0u; in q_init()
269 q->nr_in_top_levels = 0u; in q_init()
[all …]
Ddm-cache-policy-mq.c140 static void queue_init(struct queue *q) in queue_init() argument
144 q->nr_elts = 0; in queue_init()
145 q->current_writeback_sentinels = false; in queue_init()
146 q->next_writeback = 0; in queue_init()
148 INIT_LIST_HEAD(q->qs + i); in queue_init()
149 INIT_LIST_HEAD(q->sentinels + i); in queue_init()
150 INIT_LIST_HEAD(q->sentinels + NR_QUEUE_LEVELS + i); in queue_init()
151 INIT_LIST_HEAD(q->sentinels + (2 * NR_QUEUE_LEVELS) + i); in queue_init()
155 static unsigned queue_size(struct queue *q) in queue_size() argument
157 return q->nr_elts; in queue_size()
[all …]
Ddm-table.c280 struct request_queue *q; in device_area_is_invalid() local
294 q = bdev_get_queue(bdev); in device_area_is_invalid()
295 if (!q || !q->make_request_fn) { in device_area_is_invalid()
436 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() local
439 if (unlikely(!q)) { in dm_set_device_limits()
450 q->limits.physical_block_size, in dm_set_device_limits()
451 q->limits.logical_block_size, in dm_set_device_limits()
452 q->limits.alignment_offset, in dm_set_device_limits()
893 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); in dm_table_set_type() local
895 if (!blk_queue_stackable(q)) { in dm_table_set_type()
[all …]
/linux-4.4.14/drivers/net/ethernet/intel/fm10k/
Dfm10k_common.c341 struct fm10k_hw_stats_q *q, in fm10k_update_hw_stats_tx_q() argument
353 &q->tx_packets); in fm10k_update_hw_stats_tx_q()
358 &q->tx_bytes); in fm10k_update_hw_stats_tx_q()
370 if (q->tx_stats_idx == id_tx) { in fm10k_update_hw_stats_tx_q()
371 q->tx_packets.count += tx_packets; in fm10k_update_hw_stats_tx_q()
372 q->tx_bytes.count += tx_bytes; in fm10k_update_hw_stats_tx_q()
376 fm10k_update_hw_base_32b(&q->tx_packets, tx_packets); in fm10k_update_hw_stats_tx_q()
377 fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes); in fm10k_update_hw_stats_tx_q()
379 q->tx_stats_idx = id_tx; in fm10k_update_hw_stats_tx_q()
392 struct fm10k_hw_stats_q *q, in fm10k_update_hw_stats_rx_q() argument
[all …]
/linux-4.4.14/kernel/
Dlatencytop.c105 int q, same = 1; in account_global_scheduler_latency() local
113 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in account_global_scheduler_latency()
114 unsigned long record = lat->backtrace[q]; in account_global_scheduler_latency()
116 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency()
176 int i, q; in __account_scheduler_latency() local
203 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in __account_scheduler_latency()
204 unsigned long record = lat.backtrace[q]; in __account_scheduler_latency()
206 if (mylat->backtrace[q] != record) { in __account_scheduler_latency()
248 int q; in lstats_show() local
251 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in lstats_show()
[all …]
Dfutex.c1164 static void __unqueue_futex(struct futex_q *q) in __unqueue_futex() argument
1168 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) in __unqueue_futex()
1169 || WARN_ON(plist_node_empty(&q->list))) in __unqueue_futex()
1172 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); in __unqueue_futex()
1173 plist_del(&q->list, &hb->chain); in __unqueue_futex()
1183 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) in mark_wake_futex() argument
1185 struct task_struct *p = q->task; in mark_wake_futex()
1187 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) in mark_wake_futex()
1195 __unqueue_futex(q); in mark_wake_futex()
1203 q->lock_ptr = NULL; in mark_wake_futex()
[all …]
/linux-4.4.14/mm/
Dquicklist.c50 static long min_pages_to_free(struct quicklist *q, in min_pages_to_free() argument
55 pages_to_free = q->nr_pages - max_pages(min_pages); in min_pages_to_free()
67 struct quicklist *q; in quicklist_trim() local
69 q = &get_cpu_var(quicklist)[nr]; in quicklist_trim()
70 if (q->nr_pages > min_pages) { in quicklist_trim()
71 pages_to_free = min_pages_to_free(q, min_pages, max_free); in quicklist_trim()
93 struct quicklist *ql, *q; in quicklist_total_size() local
97 for (q = ql; q < ql + CONFIG_NR_QUICK; q++) in quicklist_total_size()
98 count += q->nr_pages; in quicklist_total_size()
/linux-4.4.14/drivers/media/common/saa7146/
Dsaa7146_fops.c51 void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q, in saa7146_dma_free() argument
59 videobuf_waiton(q, &buf->vb, 0, 0); in saa7146_dma_free()
60 videobuf_dma_unmap(q->dev, dma); in saa7146_dma_free()
70 struct saa7146_dmaqueue *q, in saa7146_buffer_queue() argument
74 DEB_EE("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf); in saa7146_buffer_queue()
76 BUG_ON(!q); in saa7146_buffer_queue()
78 if (NULL == q->curr) { in saa7146_buffer_queue()
79 q->curr = buf; in saa7146_buffer_queue()
83 list_add_tail(&buf->vb.queue,&q->queue); in saa7146_buffer_queue()
92 struct saa7146_dmaqueue *q, in saa7146_buffer_finish() argument
[all …]
/linux-4.4.14/drivers/scsi/aacraid/
Ddpcsup.c54 unsigned int aac_response_normal(struct aac_queue * q) in aac_response_normal() argument
56 struct aac_dev * dev = q->dev; in aac_response_normal()
63 spin_lock_irqsave(q->lock, flags); in aac_response_normal()
70 while(aac_consumer_get(dev, q, &entry)) in aac_response_normal()
78 aac_consumer_free(dev, q, HostNormRespQueue); in aac_response_normal()
90 spin_unlock_irqrestore(q->lock, flags); in aac_response_normal()
93 spin_lock_irqsave(q->lock, flags); in aac_response_normal()
96 spin_unlock_irqrestore(q->lock, flags); in aac_response_normal()
150 spin_lock_irqsave(q->lock, flags); in aac_response_normal()
158 spin_unlock_irqrestore(q->lock, flags); in aac_response_normal()
[all …]
/linux-4.4.14/drivers/firewire/
Dcore-topology.c37 #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f) argument
38 #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01) argument
39 #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01) argument
40 #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f) argument
41 #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03) argument
42 #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01) argument
43 #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01) argument
44 #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01) argument
46 #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07) argument
55 u32 q; in count_ports() local
[all …]
/linux-4.4.14/kernel/trace/
Dblktrace.c310 int blk_trace_remove(struct request_queue *q) in blk_trace_remove() argument
314 bt = xchg(&q->blk_trace, NULL); in blk_trace_remove()
436 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, in do_blk_trace_setup() argument
522 if (cmpxchg(&q->blk_trace, NULL, bt)) in do_blk_trace_setup()
534 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, in blk_trace_setup() argument
545 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); in blk_trace_setup()
550 blk_trace_remove(q); in blk_trace_setup()
558 static int compat_blk_trace_setup(struct request_queue *q, char *name, in compat_blk_trace_setup() argument
578 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); in compat_blk_trace_setup()
583 blk_trace_remove(q); in compat_blk_trace_setup()
[all …]
/linux-4.4.14/lib/mpi/
Dmpih-div.c117 mpi_limb_t q; in mpihelp_divrem() local
129 q = ~(mpi_limb_t) 0; in mpihelp_divrem()
135 qp[i] = q; in mpihelp_divrem()
141 udiv_qrnnd(q, r, n1, n0, d1); in mpihelp_divrem()
142 umul_ppmm(n1, n0, d0, q); in mpihelp_divrem()
149 q--; in mpihelp_divrem()
156 qp[i] = q; in mpihelp_divrem()
184 mpi_limb_t q; in mpihelp_divrem() local
200 q = ~(mpi_limb_t) 0; in mpihelp_divrem()
204 udiv_qrnnd(q, r, n0, np[dsize - 1], dX); in mpihelp_divrem()
[all …]
/linux-4.4.14/fs/jffs2/
Dcompr_rubin.c39 unsigned long q; member
92 rs->q = 0; in init_rubin()
108 while ((rs->q >= UPPER_BIT_RUBIN) || in encode()
109 ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { in encode()
112 ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); in encode()
115 rs->q &= LOWER_BITS_RUBIN; in encode()
116 rs->q <<= 1; in encode()
132 rs->q += i0; in encode()
144 pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1); in end_rubin()
145 rs->q &= LOWER_BITS_RUBIN; in end_rubin()
[all …]
/linux-4.4.14/drivers/net/ethernet/cavium/thunder/
Dnicvf_ethtool.c319 int mbox, key, stat, q; in nicvf_get_regs() local
352 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) { in nicvf_get_regs()
353 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q); in nicvf_get_regs()
354 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q); in nicvf_get_regs()
355 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q); in nicvf_get_regs()
356 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q); in nicvf_get_regs()
357 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q); in nicvf_get_regs()
358 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q); in nicvf_get_regs()
359 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q); in nicvf_get_regs()
360 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q); in nicvf_get_regs()
[all …]
/linux-4.4.14/drivers/scsi/
Dscsi_lib.c123 struct request_queue *q = cmd->request->q; in scsi_mq_requeue_cmd() local
126 blk_mq_kick_requeue_list(q); in scsi_mq_requeue_cmd()
145 struct request_queue *q = device->request_queue; in __scsi_queue_insert() local
167 if (q->mq_ops) { in __scsi_queue_insert()
171 spin_lock_irqsave(q->queue_lock, flags); in __scsi_queue_insert()
172 blk_requeue_request(q, cmd->request); in __scsi_queue_insert()
174 spin_unlock_irqrestore(q->queue_lock, flags); in __scsi_queue_insert()
245 blk_execute_rq(req->q, NULL, req, 1); in scsi_execute()
329 static void scsi_kick_queue(struct request_queue *q) in scsi_kick_queue() argument
331 if (q->mq_ops) in scsi_kick_queue()
[all …]
Dgvp11.c213 unsigned char q, qq; in check_wd33c93() local
232 q = *sasr_3393; /* read it */ in check_wd33c93()
233 if (q & 0x08) /* bit 3 should always be clear */ in check_wd33c93()
240 if (*sasr_3393 != q) { /* should still read the same */ in check_wd33c93()
244 if (*scmd_3393 != q) /* and so should the image at 0x1f */ in check_wd33c93()
254 q = *scmd_3393; in check_wd33c93()
256 *scmd_3393 = ~q; in check_wd33c93()
260 *scmd_3393 = q; in check_wd33c93()
261 if (qq != q) /* should be read only */ in check_wd33c93()
264 q = *scmd_3393; in check_wd33c93()
[all …]
Dscsi_dh.c292 static struct scsi_device *get_sdev_from_queue(struct request_queue *q) in get_sdev_from_queue() argument
297 spin_lock_irqsave(q->queue_lock, flags); in get_sdev_from_queue()
298 sdev = q->queuedata; in get_sdev_from_queue()
301 spin_unlock_irqrestore(q->queue_lock, flags); in get_sdev_from_queue()
319 int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) in scsi_dh_activate() argument
324 sdev = get_sdev_from_queue(q); in scsi_dh_activate()
366 int scsi_dh_set_params(struct request_queue *q, const char *params) in scsi_dh_set_params() argument
371 sdev = get_sdev_from_queue(q); in scsi_dh_set_params()
388 int scsi_dh_attach(struct request_queue *q, const char *name) in scsi_dh_attach() argument
394 sdev = get_sdev_from_queue(q); in scsi_dh_attach()
[all …]
/linux-4.4.14/drivers/net/ethernet/hisilicon/hns/
Dhns_dsaf_rcb.c70 void hns_rcb_reset_ring_hw(struct hnae_queue *q) in hns_rcb_reset_ring_hw() argument
80 tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); in hns_rcb_reset_ring_hw()
84 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); in hns_rcb_reset_ring_hw()
86 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); in hns_rcb_reset_ring_hw()
89 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); in hns_rcb_reset_ring_hw()
93 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); in hns_rcb_reset_ring_hw()
95 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); in hns_rcb_reset_ring_hw()
98 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); in hns_rcb_reset_ring_hw()
103 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); in hns_rcb_reset_ring_hw()
110 dev_err(q->dev->dev, "port%d reset ring fail\n", in hns_rcb_reset_ring_hw()
[all …]
Dhnae.c189 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) in hnae_init_ring() argument
196 ring->q = q; in hnae_init_ring()
232 static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q, in hnae_init_queue() argument
237 q->dev = dev; in hnae_init_queue()
238 q->handle = h; in hnae_init_queue()
240 ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR); in hnae_init_queue()
244 ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR); in hnae_init_queue()
249 dev->ops->init_queue(q); in hnae_init_queue()
254 hnae_fini_ring(&q->tx_ring); in hnae_init_queue()
259 static void hnae_fini_queue(struct hnae_queue *q) in hnae_fini_queue() argument
[all …]
/linux-4.4.14/sound/core/
Dmisc.c121 const struct snd_pci_quirk *q; in snd_pci_quirk_lookup_id() local
123 for (q = list; q->subvendor; q++) { in snd_pci_quirk_lookup_id()
124 if (q->subvendor != vendor) in snd_pci_quirk_lookup_id()
126 if (!q->subdevice || in snd_pci_quirk_lookup_id()
127 (device & q->subdevice_mask) == q->subdevice) in snd_pci_quirk_lookup_id()
128 return q; in snd_pci_quirk_lookup_id()
/linux-4.4.14/drivers/mmc/card/
Dqueue.c29 static int mmc_prep_request(struct request_queue *q, struct request *req) in mmc_prep_request() argument
31 struct mmc_queue *mq = q->queuedata; in mmc_prep_request()
52 struct request_queue *q = mq->queue; in mmc_queue_thread() local
61 spin_lock_irq(q->queue_lock); in mmc_queue_thread()
63 req = blk_fetch_request(q); in mmc_queue_thread()
65 spin_unlock_irq(q->queue_lock); in mmc_queue_thread()
111 static void mmc_request_fn(struct request_queue *q) in mmc_request_fn() argument
113 struct mmc_queue *mq = q->queuedata; in mmc_request_fn()
119 while ((req = blk_fetch_request(q)) != NULL) { in mmc_request_fn()
158 static void mmc_queue_setup_discard(struct request_queue *q, in mmc_queue_setup_discard() argument
[all …]
/linux-4.4.14/include/net/
Dinet_frag.h97 bool (*match)(const struct inet_frag_queue *q,
99 void (*constructor)(struct inet_frag_queue *q,
122 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
123 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
127 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
130 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) in inet_frag_put() argument
132 if (atomic_dec_and_test(&q->refcnt)) in inet_frag_put()
133 inet_frag_destroy(q, f); in inet_frag_put()
136 static inline bool inet_frag_evicting(struct inet_frag_queue *q) in inet_frag_evicting() argument
138 return !hlist_unhashed(&q->list_evictor); in inet_frag_evicting()
Dpkt_sched.h19 static inline void *qdisc_priv(struct Qdisc *q) in qdisc_priv() argument
21 return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc)); in qdisc_priv()
83 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
92 void qdisc_list_add(struct Qdisc *q);
93 void qdisc_list_del(struct Qdisc *q);
101 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
105 void __qdisc_run(struct Qdisc *q);
107 static inline void qdisc_run(struct Qdisc *q) in qdisc_run() argument
109 if (qdisc_run_begin(q)) in qdisc_run()
110 __qdisc_run(q); in qdisc_run()
Dsch_generic.h74 struct Qdisc *q);
94 struct sk_buff_head q; member
248 struct Qdisc *q; member
270 static inline int qdisc_qlen(const struct Qdisc *q) in qdisc_qlen() argument
272 return q->q.qlen; in qdisc_qlen()
282 return &qdisc->q.lock; in qdisc_lock()
287 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); in qdisc_root() local
289 return q; in qdisc_root()
329 static inline void sch_tree_lock(const struct Qdisc *q) in sch_tree_lock() argument
331 spin_lock_bh(qdisc_root_sleeping_lock(q)); in sch_tree_lock()
[all …]
/linux-4.4.14/include/crypto/
Db128ops.h64 static inline void u128_xor(u128 *r, const u128 *p, const u128 *q) in u128_xor() argument
66 r->a = p->a ^ q->a; in u128_xor()
67 r->b = p->b ^ q->b; in u128_xor()
70 static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) in be128_xor() argument
72 u128_xor((u128 *)r, (u128 *)p, (u128 *)q); in be128_xor()
75 static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) in le128_xor() argument
77 u128_xor((u128 *)r, (u128 *)p, (u128 *)q); in le128_xor()
/linux-4.4.14/drivers/media/pci/cx88/
Dcx88-vbi.c47 struct cx88_dmaqueue *q, in cx8800_start_vbi_dma() argument
62 q->count = 0; in cx8800_start_vbi_dma()
94 struct cx88_dmaqueue *q) in cx8800_restart_vbi_queue() argument
98 if (list_empty(&q->active)) in cx8800_restart_vbi_queue()
101 buf = list_entry(q->active.next, struct cx88_buffer, list); in cx8800_restart_vbi_queue()
104 cx8800_start_vbi_dma(dev, q, buf); in cx8800_restart_vbi_queue()
110 static int queue_setup(struct vb2_queue *q, const void *parg, in queue_setup() argument
114 struct cx8800_dev *dev = q->drv_priv; in queue_setup()
169 struct cx88_dmaqueue *q = &dev->vbiq; in buffer_queue() local
176 if (list_empty(&q->active)) { in buffer_queue()
[all …]
/linux-4.4.14/drivers/block/
Dnull_blk.c35 struct request_queue *q; member
220 struct request_queue *q = NULL; in end_cmd() local
223 q = cmd->rq->q; in end_cmd()
241 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) { in end_cmd()
244 spin_lock_irqsave(q->queue_lock, flags); in end_cmd()
245 blk_start_queue_async(q); in end_cmd()
246 spin_unlock_irqrestore(q->queue_lock, flags); in end_cmd()
311 static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) in null_queue_bio() argument
313 struct nullb *nullb = q->queuedata; in null_queue_bio()
324 static int null_rq_prep_fn(struct request_queue *q, struct request *req) in null_rq_prep_fn() argument
[all …]
/linux-4.4.14/drivers/pcmcia/
Dcistpl.c664 u_char *p, *q; in parse_device() local
667 q = p + tuple->TupleDataLen; in parse_device()
693 if (++p == q) in parse_device()
697 if (++p == q) in parse_device()
704 if (++p == q) in parse_device()
713 if (++p == q) in parse_device()
762 static int parse_strings(u_char *p, u_char *q, int max, in parse_strings() argument
767 if (p == q) in parse_strings()
779 if (++p == q) in parse_strings()
782 if ((*p == 0xff) || (++p == q)) in parse_strings()
[all …]
Drsrc_nonstatic.c113 struct resource_map *p, *q; in add_interval() local
123 q = kmalloc(sizeof(struct resource_map), GFP_KERNEL); in add_interval()
124 if (!q) { in add_interval()
128 q->base = base; q->num = num; in add_interval()
129 q->next = p->next; p->next = q; in add_interval()
137 struct resource_map *p, *q; in sub_interval() local
139 for (p = map; ; p = q) { in sub_interval()
140 q = p->next; in sub_interval()
141 if (q == map) in sub_interval()
143 if ((q->base+q->num > base) && (base+num > q->base)) { in sub_interval()
[all …]
/linux-4.4.14/drivers/ide/
Dide-pm.c48 struct request_queue *q = rq->q; in ide_pm_execute_rq() local
54 spin_lock_irq(q->queue_lock); in ide_pm_execute_rq()
55 if (unlikely(blk_queue_dying(q))) { in ide_pm_execute_rq()
59 spin_unlock_irq(q->queue_lock); in ide_pm_execute_rq()
62 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); in ide_pm_execute_rq()
63 __blk_run_queue_uncond(q); in ide_pm_execute_rq()
64 spin_unlock_irq(q->queue_lock); in ide_pm_execute_rq()
214 struct request_queue *q = drive->queue; in ide_complete_pm_rq() local
226 spin_lock_irqsave(q->queue_lock, flags); in ide_complete_pm_rq()
228 blk_stop_queue(q); in ide_complete_pm_rq()
[all …]
/linux-4.4.14/drivers/media/pci/cx18/
Dcx18-queue.c50 void cx18_queue_init(struct cx18_queue *q) in cx18_queue_init() argument
52 INIT_LIST_HEAD(&q->list); in cx18_queue_init()
53 atomic_set(&q->depth, 0); in cx18_queue_init()
54 q->bytesused = 0; in cx18_queue_init()
58 struct cx18_queue *q, int to_front) in _cx18_enqueue() argument
61 if (q != &s->q_full) { in _cx18_enqueue()
70 if (q == &s->q_busy && in _cx18_enqueue()
71 atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM) in _cx18_enqueue()
72 q = &s->q_free; in _cx18_enqueue()
74 spin_lock(&q->lock); in _cx18_enqueue()
[all …]
Dcx18-queue.h70 struct cx18_queue *q, int to_front);
74 struct cx18_queue *q) in cx18_enqueue() argument
76 return _cx18_enqueue(s, mdl, q, 0); /* FIFO */ in cx18_enqueue()
81 struct cx18_queue *q) in cx18_push() argument
83 return _cx18_enqueue(s, mdl, q, 1); /* LIFO */ in cx18_push()
86 void cx18_queue_init(struct cx18_queue *q);
87 struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
/linux-4.4.14/sound/oss/
Dmidibuf.c58 #define DATA_AVAIL(q) (q->len) argument
59 #define SPACE_AVAIL(q) (MAX_QUEUE_SIZE - q->len) argument
61 #define QUEUE_BYTE(q, data) \ argument
62 if (SPACE_AVAIL(q)) \
66 q->queue[q->tail] = (data); \
67 q->len++; q->tail = (q->tail+1) % MAX_QUEUE_SIZE; \
71 #define REMOVE_BYTE(q, data) \ argument
72 if (DATA_AVAIL(q)) \
76 data = q->queue[q->head]; \
77 q->len--; q->head = (q->head+1) % MAX_QUEUE_SIZE; \
Dsequencer.c360 static int extended_event(unsigned char *q) in extended_event() argument
362 int dev = q[2]; in extended_event()
370 switch (q[1]) in extended_event()
373 synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]); in extended_event()
377 if (q[4] > 127 && q[4] != 255) in extended_event()
380 if (q[5] == 0) in extended_event()
382 synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]); in extended_event()
385 synth_devs[dev]->start_note(dev, q[3], q[4], q[5]); in extended_event()
389 synth_devs[dev]->set_instr(dev, q[3], q[4]); in extended_event()
393 synth_devs[dev]->aftertouch(dev, q[3], q[4]); in extended_event()
[all …]
/linux-4.4.14/arch/mips/math-emu/
Dsp_sqrt.c26 int ix, s, q, m, t, i; in ieee754sp_sqrt() local
85 q = s = 0; /* q = sqrt(x) */ in ieee754sp_sqrt()
93 q += r; in ieee754sp_sqrt()
103 q += 2; in ieee754sp_sqrt()
106 q += (q & 1); in ieee754sp_sqrt()
110 ix = (q >> 1) + 0x3f000000; in ieee754sp_sqrt()
/linux-4.4.14/fs/xfs/
Dxfs_qm_syscalls.c55 struct xfs_quotainfo *q = mp->m_quotainfo; in xfs_qm_scall_quotaoff() local
78 ASSERT(q); in xfs_qm_scall_quotaoff()
79 mutex_lock(&q->qi_quotaofflock); in xfs_qm_scall_quotaoff()
90 mutex_unlock(&q->qi_quotaofflock); in xfs_qm_scall_quotaoff()
195 mutex_unlock(&q->qi_quotaofflock); in xfs_qm_scall_quotaoff()
203 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { in xfs_qm_scall_quotaoff()
204 IRELE(q->qi_uquotaip); in xfs_qm_scall_quotaoff()
205 q->qi_uquotaip = NULL; in xfs_qm_scall_quotaoff()
207 if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) { in xfs_qm_scall_quotaoff()
208 IRELE(q->qi_gquotaip); in xfs_qm_scall_quotaoff()
[all …]
Dxfs_quotaops.c39 struct xfs_quotainfo *q = mp->m_quotainfo; in xfs_qm_fill_state() local
53 tstate->spc_timelimit = q->qi_btimelimit; in xfs_qm_fill_state()
54 tstate->ino_timelimit = q->qi_itimelimit; in xfs_qm_fill_state()
55 tstate->rt_spc_timelimit = q->qi_rtbtimelimit; in xfs_qm_fill_state()
56 tstate->spc_warnlimit = q->qi_bwarnlimit; in xfs_qm_fill_state()
57 tstate->ino_warnlimit = q->qi_iwarnlimit; in xfs_qm_fill_state()
58 tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit; in xfs_qm_fill_state()
73 struct xfs_quotainfo *q = mp->m_quotainfo; in xfs_fs_get_quota_state() local
78 state->s_incoredqs = q->qi_dquots; in xfs_fs_get_quota_state()
92 xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip, in xfs_fs_get_quota_state()
[all …]
/linux-4.4.14/net/sunrpc/
Dsched.c103 struct list_head *q = &queue->tasks[queue->priority]; in rpc_rotate_queue_owner() local
106 if (!list_empty(q)) { in rpc_rotate_queue_owner()
107 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); in rpc_rotate_queue_owner()
109 list_move_tail(&task->u.tk_wait.list, q); in rpc_rotate_queue_owner()
141 struct list_head *q; in __rpc_add_wait_queue_priority() local
149 q = &queue->tasks[queue_priority]; in __rpc_add_wait_queue_priority()
150 list_for_each_entry(t, q, u.tk_wait.list) { in __rpc_add_wait_queue_priority()
156 list_add_tail(&task->u.tk_wait.list, q); in __rpc_add_wait_queue_priority()
352 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, in __rpc_sleep_on_priority() argument
358 task->tk_pid, rpc_qname(q), jiffies); in __rpc_sleep_on_priority()
[all …]
/linux-4.4.14/drivers/gpu/drm/radeon/
Dradeon_mem.c118 struct mem_block *q = p->next; in free_block() local
119 p->size += q->size; in free_block()
120 p->next = q->next; in free_block()
122 kfree(q); in free_block()
126 struct mem_block *q = p->prev; in free_block() local
127 q->size += p->size; in free_block()
128 q->next = p->next; in free_block()
129 q->next->prev = q; in free_block()
178 struct mem_block *q = p->next; in radeon_mem_release() local
179 p->size += q->size; in radeon_mem_release()
[all …]
/linux-4.4.14/drivers/infiniband/hw/mthca/
Dmthca_mad.c295 int p, q; in mthca_create_agents() local
301 for (q = 0; q <= 1; ++q) { in mthca_create_agents()
303 q ? IB_QPT_GSI : IB_QPT_SMI, in mthca_create_agents()
310 dev->send_agent[p][q] = agent; in mthca_create_agents()
327 for (q = 0; q <= 1; ++q) in mthca_create_agents()
328 if (dev->send_agent[p][q]) in mthca_create_agents()
329 ib_unregister_mad_agent(dev->send_agent[p][q]); in mthca_create_agents()
337 int p, q; in mthca_free_agents() local
340 for (q = 0; q <= 1; ++q) { in mthca_free_agents()
341 agent = dev->send_agent[p][q]; in mthca_free_agents()
[all …]
/linux-4.4.14/drivers/media/pci/ivtv/
Divtv-queue.c44 void ivtv_queue_init(struct ivtv_queue *q) in ivtv_queue_init() argument
46 INIT_LIST_HEAD(&q->list); in ivtv_queue_init()
47 q->buffers = 0; in ivtv_queue_init()
48 q->length = 0; in ivtv_queue_init()
49 q->bytesused = 0; in ivtv_queue_init()
52 void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q) in ivtv_enqueue() argument
57 if (q == &s->q_free) { in ivtv_enqueue()
64 list_add_tail(&buf->list, &q->list); in ivtv_enqueue()
65 q->buffers++; in ivtv_enqueue()
66 q->length += s->buf_size; in ivtv_enqueue()
[all …]
/linux-4.4.14/tools/power/cpupower/utils/helpers/
Dbitmask.c104 static const char *nexttoken(const char *q, int sep) in nexttoken() argument
106 if (q) in nexttoken()
107 q = strchr(q, sep); in nexttoken()
108 if (q) in nexttoken()
109 q++; in nexttoken()
110 return q; in nexttoken()
193 const char *p, *q; in bitmask_parselist() local
197 q = buf; in bitmask_parselist()
198 while (p = q, q = nexttoken(q, ','), p) { in bitmask_parselist()
/linux-4.4.14/ipc/
Dsem.c208 struct sem_queue *q, *tq; in unmerge_queues() local
218 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { in unmerge_queues()
220 curr = &sma->sem_base[q->sops[0].sem_num]; in unmerge_queues()
222 list_add_tail(&q->list, &curr->pending_alter); in unmerge_queues()
613 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) in perform_atomic_semop() argument
621 sops = q->sops; in perform_atomic_semop()
622 nsops = q->nsops; in perform_atomic_semop()
623 un = q->undo; in perform_atomic_semop()
651 pid = q->pid; in perform_atomic_semop()
664 q->blocking = sop; in perform_atomic_semop()
[all …]
/linux-4.4.14/net/8021q/
DMakefile5 obj-$(CONFIG_VLAN_8021Q) += 8021q.o
7 8021q-y := vlan.o vlan_dev.o vlan_netlink.o
8 8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
9 8021q-$(CONFIG_VLAN_8021Q_MVRP) += vlan_mvrp.o
10 8021q-$(CONFIG_PROC_FS) += vlanproc.o
/linux-4.4.14/drivers/scsi/lpfc/
Dlpfc_debugfs.h290 lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx) in lpfc_debug_dump_qe() argument
297 if (!q) in lpfc_debug_dump_qe()
299 if (idx >= q->entry_count) in lpfc_debug_dump_qe()
302 esize = q->entry_size; in lpfc_debug_dump_qe()
304 pword = q->qe[idx].address; in lpfc_debug_dump_qe()
338 lpfc_debug_dump_q(struct lpfc_queue *q) in lpfc_debug_dump_q() argument
343 if (!q) in lpfc_debug_dump_q()
346 dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev, in lpfc_debug_dump_q()
350 (q->phba)->brd_no, in lpfc_debug_dump_q()
351 q->queue_id, q->type, q->subtype, in lpfc_debug_dump_q()
[all …]
/linux-4.4.14/drivers/hid/usbhid/
Dhid-quirks.c183 struct quirks_list_struct *q; in usbhid_exists_dquirk() local
186 list_for_each_entry(q, &dquirks_list, node) { in usbhid_exists_dquirk()
187 if (q->hid_bl_item.idVendor == idVendor && in usbhid_exists_dquirk()
188 q->hid_bl_item.idProduct == idProduct) { in usbhid_exists_dquirk()
189 bl_entry = &q->hid_bl_item; in usbhid_exists_dquirk()
219 struct quirks_list_struct *q_new, *q; in usbhid_modify_dquirk() local
239 list_for_each_entry(q, &dquirks_list, node) { in usbhid_modify_dquirk()
241 if (q->hid_bl_item.idVendor == idVendor && in usbhid_modify_dquirk()
242 q->hid_bl_item.idProduct == idProduct) { in usbhid_modify_dquirk()
244 list_replace(&q->node, &q_new->node); in usbhid_modify_dquirk()
[all …]
/linux-4.4.14/drivers/media/pci/cx23885/
Dcx23885-vbi.c96 struct cx23885_dmaqueue *q, in cx23885_start_vbi_dma() argument
109 q->count = 0; in cx23885_start_vbi_dma()
124 static int queue_setup(struct vb2_queue *q, const void *parg, in queue_setup() argument
128 struct cx23885_dev *dev = q->drv_priv; in queue_setup()
200 struct cx23885_dmaqueue *q = &dev->vbiq; in buffer_queue() local
208 if (list_empty(&q->active)) { in buffer_queue()
210 list_add_tail(&buf->queue, &q->active); in buffer_queue()
217 prev = list_entry(q->active.prev, struct cx23885_buffer, in buffer_queue()
220 list_add_tail(&buf->queue, &q->active); in buffer_queue()
228 static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count) in cx23885_start_streaming() argument
[all …]
Dcx23885-video.c98 struct cx23885_dmaqueue *q, u32 count) in cx23885_video_wakeup() argument
102 if (list_empty(&q->active)) in cx23885_video_wakeup()
104 buf = list_entry(q->active.next, in cx23885_video_wakeup()
107 buf->vb.sequence = q->count++; in cx23885_video_wakeup()
110 buf->vb.vb2_buf.index, count, q->count); in cx23885_video_wakeup()
291 struct cx23885_dmaqueue *q, in cx23885_start_video_dma() argument
305 q->count = 0; in cx23885_start_video_dma()
318 static int queue_setup(struct vb2_queue *q, const void *parg, in queue_setup() argument
322 struct cx23885_dev *dev = q->drv_priv; in queue_setup()
448 struct cx23885_dmaqueue *q = &dev->vidq; in buffer_queue() local
[all …]
/linux-4.4.14/drivers/net/usb/
Dcatc.c190 void (*callback)(struct catc *catc, struct ctrl_queue *q);
483 struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; in catc_ctrl_run() local
489 dr->bRequest = q->request; in catc_ctrl_run()
490 dr->bRequestType = 0x40 | q->dir; in catc_ctrl_run()
491 dr->wValue = cpu_to_le16(q->value); in catc_ctrl_run()
492 dr->wIndex = cpu_to_le16(q->index); in catc_ctrl_run()
493 dr->wLength = cpu_to_le16(q->len); in catc_ctrl_run()
495 urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); in catc_ctrl_run()
496 urb->transfer_buffer_length = q->len; in catc_ctrl_run()
501 if (!q->dir && q->buf && q->len) in catc_ctrl_run()
[all …]
/linux-4.4.14/drivers/media/platform/vivid/
Dvivid-core.c661 struct vb2_queue *q; in vivid_create_instance() local
1031 q = &dev->vb_vid_cap_q; in vivid_create_instance()
1032 q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : in vivid_create_instance()
1034 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; in vivid_create_instance()
1035 q->drv_priv = dev; in vivid_create_instance()
1036 q->buf_struct_size = sizeof(struct vivid_buffer); in vivid_create_instance()
1037 q->ops = &vivid_vid_cap_qops; in vivid_create_instance()
1038 q->mem_ops = &vb2_vmalloc_memops; in vivid_create_instance()
1039 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; in vivid_create_instance()
1040 q->min_buffers_needed = 2; in vivid_create_instance()
[all …]
/linux-4.4.14/arch/x86/lib/
Dmsr.c42 m->q = val; in msr_read()
55 return wrmsrl_safe(msr, m->q); in msr_write()
72 m1.q |= BIT_64(bit); in __flip_bit()
74 m1.q &= ~BIT_64(bit); in __flip_bit()
76 if (m1.q == m.q) in __flip_bit()
Dmsr-smp.c50 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) in rdmsrl_on_cpu() argument
59 *q = rv.reg.q; in rdmsrl_on_cpu()
81 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) in wrmsrl_on_cpu() argument
89 rv.reg.q = q; in wrmsrl_on_cpu()
193 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) in wrmsrl_safe_on_cpu() argument
201 rv.reg.q = q; in wrmsrl_safe_on_cpu()
209 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) in rdmsrl_safe_on_cpu() argument
218 *q = rv.reg.q; in rdmsrl_safe_on_cpu()
/linux-4.4.14/drivers/char/
Dapm-emulation.c172 static inline int queue_empty(struct apm_queue *q) in queue_empty() argument
174 return q->event_head == q->event_tail; in queue_empty()
177 static inline apm_event_t queue_get_event(struct apm_queue *q) in queue_get_event() argument
179 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; in queue_get_event()
180 return q->events[q->event_tail]; in queue_get_event()
183 static void queue_add_event(struct apm_queue *q, apm_event_t event) in queue_add_event() argument
185 q->event_head = (q->event_head + 1) % APM_MAX_EVENTS; in queue_add_event()
186 if (q->event_head == q->event_tail) { in queue_add_event()
191 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; in queue_add_event()
193 q->events[q->event_head] = event; in queue_add_event()
/linux-4.4.14/tools/testing/selftests/timers/
Dmqueue-lat.c72 mqd_t q; in mqueue_lat_test() local
77 q = mq_open("/foo", O_CREAT | O_RDONLY, 0666, NULL); in mqueue_lat_test()
78 if (q < 0) { in mqueue_lat_test()
82 mq_getattr(q, &attr); in mqueue_lat_test()
95 ret = mq_timedreceive(q, buf, sizeof(buf), NULL, &target); in mqueue_lat_test()
103 mq_close(q); in mqueue_lat_test()
/linux-4.4.14/drivers/staging/unisys/visorbus/
Dvisorchannel.c319 #define SIG_QUEUE_OFFSET(chan_hdr, q) \ argument
321 ((q) * sizeof(struct signal_queue_header)))
326 #define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \ argument
327 (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->sig_base_offset + \
542 sigqueue_debug(struct signal_queue_header *q, int which, struct seq_file *seq) in sigqueue_debug() argument
545 seq_printf(seq, " VersionId = %lu\n", (ulong)q->version); in sigqueue_debug()
546 seq_printf(seq, " Type = %lu\n", (ulong)q->chtype); in sigqueue_debug()
548 (long long)q->sig_base_offset); in sigqueue_debug()
549 seq_printf(seq, " SignalSize = %lu\n", (ulong)q->signal_size); in sigqueue_debug()
551 (ulong)q->max_slots); in sigqueue_debug()
[all …]
/linux-4.4.14/arch/x86/include/asm/
Dmsr.h19 u64 q; member
251 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
252 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
257 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
258 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
272 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) in rdmsrl_on_cpu() argument
274 rdmsrl(msr_no, *q); in rdmsrl_on_cpu()
277 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) in wrmsrl_on_cpu() argument
279 wrmsrl(msr_no, q); in wrmsrl_on_cpu()
301 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) in rdmsrl_safe_on_cpu() argument
[all …]
/linux-4.4.14/drivers/media/pci/saa7134/
Dsaa7134-core.c278 struct saa7134_dmaqueue *q, in saa7134_buffer_queue() argument
286 if (NULL == q->curr) { in saa7134_buffer_queue()
287 if (!q->need_two) { in saa7134_buffer_queue()
288 q->curr = buf; in saa7134_buffer_queue()
290 } else if (list_empty(&q->queue)) { in saa7134_buffer_queue()
291 list_add_tail(&buf->entry, &q->queue); in saa7134_buffer_queue()
293 next = list_entry(q->queue.next, struct saa7134_buf, in saa7134_buffer_queue()
295 q->curr = buf; in saa7134_buffer_queue()
299 list_add_tail(&buf->entry, &q->queue); in saa7134_buffer_queue()
306 struct saa7134_dmaqueue *q, in saa7134_buffer_finish() argument
[all …]
Dsaa7134-empress.c256 struct vb2_queue *q; in empress_init() local
282 q = &dev->empress_vbq; in empress_init()
283 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; in empress_init()
289 q->io_modes = VB2_MMAP | VB2_READ; in empress_init()
290 q->drv_priv = &dev->ts_q; in empress_init()
291 q->ops = &saa7134_empress_qops; in empress_init()
292 q->gfp_flags = GFP_DMA32; in empress_init()
293 q->mem_ops = &vb2_dma_sg_memops; in empress_init()
294 q->buf_struct_size = sizeof(struct saa7134_buf); in empress_init()
295 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; in empress_init()
[all …]
/linux-4.4.14/arch/mips/bmips/
Dsetup.c143 const struct bmips_quirk *q; in plat_mem_setup() local
161 for (q = bmips_quirk_list; q->quirk_fn; q++) { in plat_mem_setup()
163 q->compatible)) { in plat_mem_setup()
164 q->quirk_fn(); in plat_mem_setup()

12345