Lines Matching refs:inst

63 #define for_each_handle_rcu(qh, inst)			\  argument
64 list_for_each_entry_rcu(qh, &inst->handles, list)
66 #define for_each_instance(idx, inst, kdev) \ argument
67 for (idx = 0, inst = kdev->instances; \
69 idx++, inst = knav_queue_idx_to_inst(kdev, idx))
76 void knav_queue_notify(struct knav_queue_inst *inst) in knav_queue_notify() argument
80 if (!inst) in knav_queue_notify()
84 for_each_handle_rcu(qh, inst) { in knav_queue_notify()
98 struct knav_queue_inst *inst = _instdata; in knav_queue_int_handler() local
100 knav_queue_notify(inst); in knav_queue_int_handler()
105 struct knav_queue_inst *inst) in knav_queue_setup_irq() argument
107 unsigned queue = inst->id - range->queue_base; in knav_queue_setup_irq()
115 inst->irq_name, inst); in knav_queue_setup_irq()
131 static void knav_queue_free_irq(struct knav_queue_inst *inst) in knav_queue_free_irq() argument
133 struct knav_range_info *range = inst->range; in knav_queue_free_irq()
134 unsigned queue = inst->id - inst->range->queue_base; in knav_queue_free_irq()
140 free_irq(irq, inst); in knav_queue_free_irq()
144 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst) in knav_queue_is_busy() argument
146 return !list_empty(&inst->handles); in knav_queue_is_busy()
149 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst) in knav_queue_is_reserved() argument
151 return inst->range->flags & RANGE_RESERVED; in knav_queue_is_reserved()
154 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst) in knav_queue_is_shared() argument
159 for_each_handle_rcu(tmp, inst) { in knav_queue_is_shared()
169 static inline bool knav_queue_match_type(struct knav_queue_inst *inst, in knav_queue_match_type() argument
173 (inst->range->flags & RANGE_HAS_IRQ)) { in knav_queue_match_type()
176 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { in knav_queue_match_type()
179 !(inst->range->flags & in knav_queue_match_type()
189 struct knav_queue_inst *inst; in knav_queue_match_id_to_inst() local
192 for_each_instance(idx, inst, kdev) { in knav_queue_match_id_to_inst()
193 if (inst->id == id) in knav_queue_match_id_to_inst()
194 return inst; in knav_queue_match_id_to_inst()
209 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, in __knav_queue_open() argument
216 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL); in __knav_queue_open()
221 qh->inst = inst; in __knav_queue_open()
222 id = inst->id - inst->qmgr->start_queue; in __knav_queue_open()
223 qh->reg_push = &inst->qmgr->reg_push[id]; in __knav_queue_open()
224 qh->reg_pop = &inst->qmgr->reg_pop[id]; in __knav_queue_open()
225 qh->reg_peek = &inst->qmgr->reg_peek[id]; in __knav_queue_open()
228 if (!knav_queue_is_busy(inst)) { in __knav_queue_open()
229 struct knav_range_info *range = inst->range; in __knav_queue_open()
231 inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); in __knav_queue_open()
233 ret = range->ops->open_queue(range, inst, flags); in __knav_queue_open()
236 devm_kfree(inst->kdev->dev, qh); in __knav_queue_open()
240 list_add_tail_rcu(&qh->list, &inst->handles); in __knav_queue_open()
247 struct knav_queue_inst *inst; in knav_queue_open_by_id() local
253 inst = knav_queue_find_by_id(id); in knav_queue_open_by_id()
254 if (!inst) in knav_queue_open_by_id()
258 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst)) in knav_queue_open_by_id()
263 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst))) in knav_queue_open_by_id()
266 qh = __knav_queue_open(inst, name, flags); in knav_queue_open_by_id()
277 struct knav_queue_inst *inst; in knav_queue_open_by_type() local
283 for_each_instance(idx, inst, kdev) { in knav_queue_open_by_type()
284 if (knav_queue_is_reserved(inst)) in knav_queue_open_by_type()
286 if (!knav_queue_match_type(inst, type)) in knav_queue_open_by_type()
288 if (knav_queue_is_busy(inst)) in knav_queue_open_by_type()
290 qh = __knav_queue_open(inst, name, flags); in knav_queue_open_by_type()
299 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled) in knav_queue_set_notify() argument
301 struct knav_range_info *range = inst->range; in knav_queue_set_notify()
304 range->ops->set_notify(range, inst, enabled); in knav_queue_set_notify()
309 struct knav_queue_inst *inst = qh->inst; in knav_queue_enable_notifier() local
321 first = (atomic_inc_return(&inst->num_notifiers) == 1); in knav_queue_enable_notifier()
323 knav_queue_set_notify(inst, true); in knav_queue_enable_notifier()
330 struct knav_queue_inst *inst = qh->inst; in knav_queue_disable_notifier() local
337 last = (atomic_dec_return(&inst->num_notifiers) == 0); in knav_queue_disable_notifier()
339 knav_queue_set_notify(inst, false); in knav_queue_disable_notifier()
352 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) in knav_queue_set_notifier()
368 struct knav_queue_inst *inst, in knav_gp_set_notify() argument
374 queue = inst->id - range->queue_base; in knav_gp_set_notify()
384 struct knav_queue_inst *inst, unsigned flags) in knav_gp_open_queue() argument
386 return knav_queue_setup_irq(range, inst); in knav_gp_open_queue()
390 struct knav_queue_inst *inst) in knav_gp_close_queue() argument
392 knav_queue_free_irq(inst); in knav_gp_close_queue()
406 struct knav_queue_inst *inst = qh->inst; in knav_queue_get_count() local
409 atomic_read(&inst->desc_count); in knav_queue_get_count()
413 struct knav_queue_inst *inst) in knav_queue_debug_show_instance() argument
415 struct knav_device *kdev = inst->kdev; in knav_queue_debug_show_instance()
418 if (!knav_queue_is_busy(inst)) in knav_queue_debug_show_instance()
422 kdev->base_id + inst->id, inst->name); in knav_queue_debug_show_instance()
423 for_each_handle_rcu(qh, inst) { in knav_queue_debug_show_instance()
442 struct knav_queue_inst *inst; in knav_queue_debug_show() local
449 for_each_instance(idx, inst, kdev) in knav_queue_debug_show()
450 knav_queue_debug_show_instance(s, inst); in knav_queue_debug_show()
489 struct knav_queue_inst *inst = qh->inst; in knav_queue_flush() local
490 unsigned id = inst->id - inst->qmgr->start_queue; in knav_queue_flush()
492 atomic_set(&inst->desc_count, 0); in knav_queue_flush()
493 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh); in knav_queue_flush()
538 struct knav_queue_inst *inst = qh->inst; in knav_queue_close() local
547 if (!knav_queue_is_busy(inst)) { in knav_queue_close()
548 struct knav_range_info *range = inst->range; in knav_queue_close()
551 range->ops->close_queue(range, inst); in knav_queue_close()
553 devm_kfree(inst->kdev->dev, qh); in knav_queue_close()
574 ret = qh->inst->kdev->base_id + qh->inst->id; in knav_queue_device_control()
641 struct knav_queue_inst *inst = qh->inst; in knav_queue_pop() local
646 if (inst->descs) { in knav_queue_pop()
647 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) { in knav_queue_pop()
648 atomic_inc(&inst->desc_count); in knav_queue_pop()
651 idx = atomic_inc_return(&inst->desc_head); in knav_queue_pop()
653 val = inst->descs[idx]; in knav_queue_pop()
1614 struct knav_queue_inst *inst, in knav_queue_init_queue() argument
1618 inst->qmgr = knav_find_qmgr(id); in knav_queue_init_queue()
1619 if (!inst->qmgr) in knav_queue_init_queue()
1622 INIT_LIST_HEAD(&inst->handles); in knav_queue_init_queue()
1623 inst->kdev = kdev; in knav_queue_init_queue()
1624 inst->range = range; in knav_queue_init_queue()
1625 inst->irq_num = -1; in knav_queue_init_queue()
1626 inst->id = id; in knav_queue_init_queue()
1628 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL); in knav_queue_init_queue()
1631 return range->ops->init_queue(range, inst); in knav_queue_init_queue()