Lines Matching refs:desc

46 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);  in irq_set_chip()  local
48 if (!desc) in irq_set_chip()
54 desc->irq_data.chip = chip; in irq_set_chip()
55 irq_put_desc_unlock(desc, flags); in irq_set_chip()
73 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_type() local
76 if (!desc) in irq_set_irq_type()
80 ret = __irq_set_trigger(desc, type); in irq_set_irq_type()
81 irq_put_desc_busunlock(desc, flags); in irq_set_irq_type()
96 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_handler_data() local
98 if (!desc) in irq_set_handler_data()
100 desc->irq_common_data.handler_data = data; in irq_set_handler_data()
101 irq_put_desc_unlock(desc, flags); in irq_set_handler_data()
118 …struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL… in irq_set_msi_desc_off() local
120 if (!desc) in irq_set_msi_desc_off()
122 desc->irq_common_data.msi_desc = entry; in irq_set_msi_desc_off()
125 irq_put_desc_unlock(desc, flags); in irq_set_msi_desc_off()
151 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_chip_data() local
153 if (!desc) in irq_set_chip_data()
155 desc->irq_data.chip_data = data; in irq_set_chip_data()
156 irq_put_desc_unlock(desc, flags); in irq_set_chip_data()
163 struct irq_desc *desc = irq_to_desc(irq); in irq_get_irq_data() local
165 return desc ? &desc->irq_data : NULL; in irq_get_irq_data()
169 static void irq_state_clr_disabled(struct irq_desc *desc) in irq_state_clr_disabled() argument
171 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); in irq_state_clr_disabled()
174 static void irq_state_set_disabled(struct irq_desc *desc) in irq_state_set_disabled() argument
176 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); in irq_state_set_disabled()
179 static void irq_state_clr_masked(struct irq_desc *desc) in irq_state_clr_masked() argument
181 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); in irq_state_clr_masked()
184 static void irq_state_set_masked(struct irq_desc *desc) in irq_state_set_masked() argument
186 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); in irq_state_set_masked()
189 int irq_startup(struct irq_desc *desc, bool resend) in irq_startup() argument
193 irq_state_clr_disabled(desc); in irq_startup()
194 desc->depth = 0; in irq_startup()
196 irq_domain_activate_irq(&desc->irq_data); in irq_startup()
197 if (desc->irq_data.chip->irq_startup) { in irq_startup()
198 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); in irq_startup()
199 irq_state_clr_masked(desc); in irq_startup()
201 irq_enable(desc); in irq_startup()
204 check_irq_resend(desc); in irq_startup()
208 void irq_shutdown(struct irq_desc *desc) in irq_shutdown() argument
210 irq_state_set_disabled(desc); in irq_shutdown()
211 desc->depth = 1; in irq_shutdown()
212 if (desc->irq_data.chip->irq_shutdown) in irq_shutdown()
213 desc->irq_data.chip->irq_shutdown(&desc->irq_data); in irq_shutdown()
214 else if (desc->irq_data.chip->irq_disable) in irq_shutdown()
215 desc->irq_data.chip->irq_disable(&desc->irq_data); in irq_shutdown()
217 desc->irq_data.chip->irq_mask(&desc->irq_data); in irq_shutdown()
218 irq_domain_deactivate_irq(&desc->irq_data); in irq_shutdown()
219 irq_state_set_masked(desc); in irq_shutdown()
222 void irq_enable(struct irq_desc *desc) in irq_enable() argument
224 irq_state_clr_disabled(desc); in irq_enable()
225 if (desc->irq_data.chip->irq_enable) in irq_enable()
226 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_enable()
228 desc->irq_data.chip->irq_unmask(&desc->irq_data); in irq_enable()
229 irq_state_clr_masked(desc); in irq_enable()
252 void irq_disable(struct irq_desc *desc) in irq_disable() argument
254 irq_state_set_disabled(desc); in irq_disable()
255 if (desc->irq_data.chip->irq_disable) { in irq_disable()
256 desc->irq_data.chip->irq_disable(&desc->irq_data); in irq_disable()
257 irq_state_set_masked(desc); in irq_disable()
258 } else if (irq_settings_disable_unlazy(desc)) { in irq_disable()
259 mask_irq(desc); in irq_disable()
263 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) in irq_percpu_enable() argument
265 if (desc->irq_data.chip->irq_enable) in irq_percpu_enable()
266 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_percpu_enable()
268 desc->irq_data.chip->irq_unmask(&desc->irq_data); in irq_percpu_enable()
269 cpumask_set_cpu(cpu, desc->percpu_enabled); in irq_percpu_enable()
272 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) in irq_percpu_disable() argument
274 if (desc->irq_data.chip->irq_disable) in irq_percpu_disable()
275 desc->irq_data.chip->irq_disable(&desc->irq_data); in irq_percpu_disable()
277 desc->irq_data.chip->irq_mask(&desc->irq_data); in irq_percpu_disable()
278 cpumask_clear_cpu(cpu, desc->percpu_enabled); in irq_percpu_disable()
281 static inline void mask_ack_irq(struct irq_desc *desc) in mask_ack_irq() argument
283 if (desc->irq_data.chip->irq_mask_ack) in mask_ack_irq()
284 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); in mask_ack_irq()
286 desc->irq_data.chip->irq_mask(&desc->irq_data); in mask_ack_irq()
287 if (desc->irq_data.chip->irq_ack) in mask_ack_irq()
288 desc->irq_data.chip->irq_ack(&desc->irq_data); in mask_ack_irq()
290 irq_state_set_masked(desc); in mask_ack_irq()
293 void mask_irq(struct irq_desc *desc) in mask_irq() argument
295 if (desc->irq_data.chip->irq_mask) { in mask_irq()
296 desc->irq_data.chip->irq_mask(&desc->irq_data); in mask_irq()
297 irq_state_set_masked(desc); in mask_irq()
301 void unmask_irq(struct irq_desc *desc) in unmask_irq() argument
303 if (desc->irq_data.chip->irq_unmask) { in unmask_irq()
304 desc->irq_data.chip->irq_unmask(&desc->irq_data); in unmask_irq()
305 irq_state_clr_masked(desc); in unmask_irq()
309 void unmask_threaded_irq(struct irq_desc *desc) in unmask_threaded_irq() argument
311 struct irq_chip *chip = desc->irq_data.chip; in unmask_threaded_irq()
314 chip->irq_eoi(&desc->irq_data); in unmask_threaded_irq()
317 chip->irq_unmask(&desc->irq_data); in unmask_threaded_irq()
318 irq_state_clr_masked(desc); in unmask_threaded_irq()
332 struct irq_desc *desc = irq_to_desc(irq); in handle_nested_irq() local
338 raw_spin_lock_irq(&desc->lock); in handle_nested_irq()
340 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_nested_irq()
341 kstat_incr_irqs_this_cpu(desc); in handle_nested_irq()
343 action = desc->action; in handle_nested_irq()
344 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { in handle_nested_irq()
345 desc->istate |= IRQS_PENDING; in handle_nested_irq()
349 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_nested_irq()
350 raw_spin_unlock_irq(&desc->lock); in handle_nested_irq()
354 note_interrupt(desc, action_ret); in handle_nested_irq()
356 raw_spin_lock_irq(&desc->lock); in handle_nested_irq()
357 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_nested_irq()
360 raw_spin_unlock_irq(&desc->lock); in handle_nested_irq()
364 static bool irq_check_poll(struct irq_desc *desc) in irq_check_poll() argument
366 if (!(desc->istate & IRQS_POLL_INPROGRESS)) in irq_check_poll()
368 return irq_wait_for_poll(desc); in irq_check_poll()
371 static bool irq_may_run(struct irq_desc *desc) in irq_may_run() argument
379 if (!irqd_has_set(&desc->irq_data, mask)) in irq_may_run()
387 if (irq_pm_check_wakeup(desc)) in irq_may_run()
393 return irq_check_poll(desc); in irq_may_run()
407 void handle_simple_irq(struct irq_desc *desc) in handle_simple_irq() argument
409 raw_spin_lock(&desc->lock); in handle_simple_irq()
411 if (!irq_may_run(desc)) in handle_simple_irq()
414 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_simple_irq()
415 kstat_incr_irqs_this_cpu(desc); in handle_simple_irq()
417 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_simple_irq()
418 desc->istate |= IRQS_PENDING; in handle_simple_irq()
422 handle_irq_event(desc); in handle_simple_irq()
425 raw_spin_unlock(&desc->lock); in handle_simple_irq()
433 static void cond_unmask_irq(struct irq_desc *desc) in cond_unmask_irq() argument
442 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_irq()
443 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) in cond_unmask_irq()
444 unmask_irq(desc); in cond_unmask_irq()
456 void handle_level_irq(struct irq_desc *desc) in handle_level_irq() argument
458 raw_spin_lock(&desc->lock); in handle_level_irq()
459 mask_ack_irq(desc); in handle_level_irq()
461 if (!irq_may_run(desc)) in handle_level_irq()
464 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_level_irq()
465 kstat_incr_irqs_this_cpu(desc); in handle_level_irq()
471 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_level_irq()
472 desc->istate |= IRQS_PENDING; in handle_level_irq()
476 handle_irq_event(desc); in handle_level_irq()
478 cond_unmask_irq(desc); in handle_level_irq()
481 raw_spin_unlock(&desc->lock); in handle_level_irq()
486 static inline void preflow_handler(struct irq_desc *desc) in preflow_handler() argument
488 if (desc->preflow_handler) in preflow_handler()
489 desc->preflow_handler(&desc->irq_data); in preflow_handler()
492 static inline void preflow_handler(struct irq_desc *desc) { } in preflow_handler() argument
495 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) in cond_unmask_eoi_irq() argument
497 if (!(desc->istate & IRQS_ONESHOT)) { in cond_unmask_eoi_irq()
498 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
507 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_eoi_irq()
508 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { in cond_unmask_eoi_irq()
509 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
510 unmask_irq(desc); in cond_unmask_eoi_irq()
512 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
525 void handle_fasteoi_irq(struct irq_desc *desc) in handle_fasteoi_irq() argument
527 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_irq()
529 raw_spin_lock(&desc->lock); in handle_fasteoi_irq()
531 if (!irq_may_run(desc)) in handle_fasteoi_irq()
534 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_irq()
535 kstat_incr_irqs_this_cpu(desc); in handle_fasteoi_irq()
541 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_irq()
542 desc->istate |= IRQS_PENDING; in handle_fasteoi_irq()
543 mask_irq(desc); in handle_fasteoi_irq()
547 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_irq()
548 mask_irq(desc); in handle_fasteoi_irq()
550 preflow_handler(desc); in handle_fasteoi_irq()
551 handle_irq_event(desc); in handle_fasteoi_irq()
553 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_irq()
555 raw_spin_unlock(&desc->lock); in handle_fasteoi_irq()
559 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_irq()
560 raw_spin_unlock(&desc->lock); in handle_fasteoi_irq()
579 void handle_edge_irq(struct irq_desc *desc) in handle_edge_irq() argument
581 raw_spin_lock(&desc->lock); in handle_edge_irq()
583 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_edge_irq()
585 if (!irq_may_run(desc)) { in handle_edge_irq()
586 desc->istate |= IRQS_PENDING; in handle_edge_irq()
587 mask_ack_irq(desc); in handle_edge_irq()
595 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { in handle_edge_irq()
596 desc->istate |= IRQS_PENDING; in handle_edge_irq()
597 mask_ack_irq(desc); in handle_edge_irq()
601 kstat_incr_irqs_this_cpu(desc); in handle_edge_irq()
604 desc->irq_data.chip->irq_ack(&desc->irq_data); in handle_edge_irq()
607 if (unlikely(!desc->action)) { in handle_edge_irq()
608 mask_irq(desc); in handle_edge_irq()
617 if (unlikely(desc->istate & IRQS_PENDING)) { in handle_edge_irq()
618 if (!irqd_irq_disabled(&desc->irq_data) && in handle_edge_irq()
619 irqd_irq_masked(&desc->irq_data)) in handle_edge_irq()
620 unmask_irq(desc); in handle_edge_irq()
623 handle_irq_event(desc); in handle_edge_irq()
625 } while ((desc->istate & IRQS_PENDING) && in handle_edge_irq()
626 !irqd_irq_disabled(&desc->irq_data)); in handle_edge_irq()
629 raw_spin_unlock(&desc->lock); in handle_edge_irq()
641 void handle_edge_eoi_irq(struct irq_desc *desc) in handle_edge_eoi_irq() argument
643 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_edge_eoi_irq()
645 raw_spin_lock(&desc->lock); in handle_edge_eoi_irq()
647 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_edge_eoi_irq()
649 if (!irq_may_run(desc)) { in handle_edge_eoi_irq()
650 desc->istate |= IRQS_PENDING; in handle_edge_eoi_irq()
658 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { in handle_edge_eoi_irq()
659 desc->istate |= IRQS_PENDING; in handle_edge_eoi_irq()
663 kstat_incr_irqs_this_cpu(desc); in handle_edge_eoi_irq()
666 if (unlikely(!desc->action)) in handle_edge_eoi_irq()
669 handle_irq_event(desc); in handle_edge_eoi_irq()
671 } while ((desc->istate & IRQS_PENDING) && in handle_edge_eoi_irq()
672 !irqd_irq_disabled(&desc->irq_data)); in handle_edge_eoi_irq()
675 chip->irq_eoi(&desc->irq_data); in handle_edge_eoi_irq()
676 raw_spin_unlock(&desc->lock); in handle_edge_eoi_irq()
686 void handle_percpu_irq(struct irq_desc *desc) in handle_percpu_irq() argument
688 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_irq()
690 kstat_incr_irqs_this_cpu(desc); in handle_percpu_irq()
693 chip->irq_ack(&desc->irq_data); in handle_percpu_irq()
695 handle_irq_event_percpu(desc); in handle_percpu_irq()
698 chip->irq_eoi(&desc->irq_data); in handle_percpu_irq()
712 void handle_percpu_devid_irq(struct irq_desc *desc) in handle_percpu_devid_irq() argument
714 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_irq()
715 struct irqaction *action = desc->action; in handle_percpu_devid_irq()
717 unsigned int irq = irq_desc_get_irq(desc); in handle_percpu_devid_irq()
720 kstat_incr_irqs_this_cpu(desc); in handle_percpu_devid_irq()
723 chip->irq_ack(&desc->irq_data); in handle_percpu_devid_irq()
730 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_irq()
734 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, in __irq_do_set_handler() argument
740 struct irq_data *irq_data = &desc->irq_data; in __irq_do_set_handler()
769 if (desc->irq_data.chip != &no_irq_chip) in __irq_do_set_handler()
770 mask_ack_irq(desc); in __irq_do_set_handler()
771 irq_state_set_disabled(desc); in __irq_do_set_handler()
773 desc->action = NULL; in __irq_do_set_handler()
774 desc->depth = 1; in __irq_do_set_handler()
776 desc->handle_irq = handle; in __irq_do_set_handler()
777 desc->name = name; in __irq_do_set_handler()
780 irq_settings_set_noprobe(desc); in __irq_do_set_handler()
781 irq_settings_set_norequest(desc); in __irq_do_set_handler()
782 irq_settings_set_nothread(desc); in __irq_do_set_handler()
783 desc->action = &chained_action; in __irq_do_set_handler()
784 irq_startup(desc, true); in __irq_do_set_handler()
793 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); in __irq_set_handler() local
795 if (!desc) in __irq_set_handler()
798 __irq_do_set_handler(desc, handle, is_chained, name); in __irq_set_handler()
799 irq_put_desc_busunlock(desc, flags); in __irq_set_handler()
808 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); in irq_set_chained_handler_and_data() local
810 if (!desc) in irq_set_chained_handler_and_data()
813 __irq_do_set_handler(desc, handle, 1, NULL); in irq_set_chained_handler_and_data()
814 desc->irq_common_data.handler_data = data; in irq_set_chained_handler_and_data()
816 irq_put_desc_busunlock(desc, flags); in irq_set_chained_handler_and_data()
832 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_modify_status() local
834 if (!desc) in irq_modify_status()
836 irq_settings_clr_and_set(desc, clr, set); in irq_modify_status()
838 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | in irq_modify_status()
840 if (irq_settings_has_no_balance_set(desc)) in irq_modify_status()
841 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in irq_modify_status()
842 if (irq_settings_is_per_cpu(desc)) in irq_modify_status()
843 irqd_set(&desc->irq_data, IRQD_PER_CPU); in irq_modify_status()
844 if (irq_settings_can_move_pcntxt(desc)) in irq_modify_status()
845 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); in irq_modify_status()
846 if (irq_settings_is_level(desc)) in irq_modify_status()
847 irqd_set(&desc->irq_data, IRQD_LEVEL); in irq_modify_status()
849 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); in irq_modify_status()
851 irq_put_desc_unlock(desc, flags); in irq_modify_status()
863 struct irq_desc *desc; in irq_cpu_online() local
869 desc = irq_to_desc(irq); in irq_cpu_online()
870 if (!desc) in irq_cpu_online()
873 raw_spin_lock_irqsave(&desc->lock, flags); in irq_cpu_online()
875 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_online()
878 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_online()
879 chip->irq_cpu_online(&desc->irq_data); in irq_cpu_online()
881 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_cpu_online()
893 struct irq_desc *desc; in irq_cpu_offline() local
899 desc = irq_to_desc(irq); in irq_cpu_offline()
900 if (!desc) in irq_cpu_offline()
903 raw_spin_lock_irqsave(&desc->lock, flags); in irq_cpu_offline()
905 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_offline()
908 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_offline()
909 chip->irq_cpu_offline(&desc->irq_data); in irq_cpu_offline()
911 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_cpu_offline()