Lines Matching refs:desc

35 static void __synchronize_hardirq(struct irq_desc *desc)  in __synchronize_hardirq()  argument
46 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
50 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq()
51 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
52 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq()
77 struct irq_desc *desc = irq_to_desc(irq); in synchronize_hardirq() local
79 if (desc) { in synchronize_hardirq()
80 __synchronize_hardirq(desc); in synchronize_hardirq()
81 return !atomic_read(&desc->threads_active); in synchronize_hardirq()
100 struct irq_desc *desc = irq_to_desc(irq); in synchronize_irq() local
102 if (desc) { in synchronize_irq()
103 __synchronize_hardirq(desc); in synchronize_irq()
109 wait_event(desc->wait_for_threads, in synchronize_irq()
110 !atomic_read(&desc->threads_active)); in synchronize_irq()
118 static int __irq_can_set_affinity(struct irq_desc *desc) in __irq_can_set_affinity() argument
120 if (!desc || !irqd_can_balance(&desc->irq_data) || in __irq_can_set_affinity()
121 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) in __irq_can_set_affinity()
145 void irq_set_thread_affinity(struct irq_desc *desc) in irq_set_thread_affinity() argument
147 struct irqaction *action = desc->action; in irq_set_thread_affinity()
166 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending() argument
168 cpumask_copy(desc->pending_mask, mask); in irq_copy_pending()
171 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() argument
173 cpumask_copy(mask, desc->pending_mask); in irq_get_pending()
179 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } in irq_copy_pending() argument
181 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } in irq_get_pending() argument
187 struct irq_desc *desc = irq_data_to_desc(data); in irq_do_set_affinity() local
195 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity()
197 irq_set_thread_affinity(desc); in irq_do_set_affinity()
208 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_locked() local
218 irq_copy_pending(desc, mask); in irq_set_affinity_locked()
221 if (desc->affinity_notify) { in irq_set_affinity_locked()
222 kref_get(&desc->affinity_notify->kref); in irq_set_affinity_locked()
223 schedule_work(&desc->affinity_notify->work); in irq_set_affinity_locked()
232 struct irq_desc *desc = irq_to_desc(irq); in __irq_set_affinity() local
236 if (!desc) in __irq_set_affinity()
239 raw_spin_lock_irqsave(&desc->lock, flags); in __irq_set_affinity()
240 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); in __irq_set_affinity()
241 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_set_affinity()
248 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_affinity_hint() local
250 if (!desc) in irq_set_affinity_hint()
252 desc->affinity_hint = m; in irq_set_affinity_hint()
253 irq_put_desc_unlock(desc, flags); in irq_set_affinity_hint()
265 struct irq_desc *desc = irq_to_desc(notify->irq); in irq_affinity_notify() local
269 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) in irq_affinity_notify()
272 raw_spin_lock_irqsave(&desc->lock, flags); in irq_affinity_notify()
273 if (irq_move_pending(&desc->irq_data)) in irq_affinity_notify()
274 irq_get_pending(cpumask, desc); in irq_affinity_notify()
276 cpumask_copy(cpumask, desc->irq_common_data.affinity); in irq_affinity_notify()
277 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_affinity_notify()
300 struct irq_desc *desc = irq_to_desc(irq); in irq_set_affinity_notifier() local
307 if (!desc) in irq_set_affinity_notifier()
317 raw_spin_lock_irqsave(&desc->lock, flags); in irq_set_affinity_notifier()
318 old_notify = desc->affinity_notify; in irq_set_affinity_notifier()
319 desc->affinity_notify = notify; in irq_set_affinity_notifier()
320 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_set_affinity_notifier()
333 static int setup_affinity(struct irq_desc *desc, struct cpumask *mask) in setup_affinity() argument
336 int node = irq_desc_get_node(desc); in setup_affinity()
339 if (!__irq_can_set_affinity(desc)) in setup_affinity()
346 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { in setup_affinity()
347 if (cpumask_intersects(desc->irq_common_data.affinity, in setup_affinity()
349 set = desc->irq_common_data.affinity; in setup_affinity()
351 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); in setup_affinity()
362 irq_do_set_affinity(&desc->irq_data, mask, false); in setup_affinity()
378 struct irq_desc *desc = irq_to_desc(irq); in irq_select_affinity_usr() local
382 raw_spin_lock_irqsave(&desc->lock, flags); in irq_select_affinity_usr()
383 ret = setup_affinity(desc, mask); in irq_select_affinity_usr()
384 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_select_affinity_usr()
390 setup_affinity(struct irq_desc *desc, struct cpumask *mask) in setup_affinity() argument
409 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_vcpu_affinity() local
414 if (!desc) in irq_set_vcpu_affinity()
417 data = irq_desc_get_irq_data(desc); in irq_set_vcpu_affinity()
421 irq_put_desc_unlock(desc, flags); in irq_set_vcpu_affinity()
427 void __disable_irq(struct irq_desc *desc) in __disable_irq() argument
429 if (!desc->depth++) in __disable_irq()
430 irq_disable(desc); in __disable_irq()
436 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in __disable_irq_nosync() local
438 if (!desc) in __disable_irq_nosync()
440 __disable_irq(desc); in __disable_irq_nosync()
441 irq_put_desc_busunlock(desc, flags); in __disable_irq_nosync()
507 void __enable_irq(struct irq_desc *desc) in __enable_irq() argument
509 switch (desc->depth) { in __enable_irq()
513 irq_desc_get_irq(desc)); in __enable_irq()
516 if (desc->istate & IRQS_SUSPENDED) in __enable_irq()
519 irq_settings_set_noprobe(desc); in __enable_irq()
520 irq_enable(desc); in __enable_irq()
521 check_irq_resend(desc); in __enable_irq()
525 desc->depth--; in __enable_irq()
543 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in enable_irq() local
545 if (!desc) in enable_irq()
547 if (WARN(!desc->irq_data.chip, in enable_irq()
551 __enable_irq(desc); in enable_irq()
553 irq_put_desc_busunlock(desc, flags); in enable_irq()
559 struct irq_desc *desc = irq_to_desc(irq); in set_irq_wake_real() local
562 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) in set_irq_wake_real()
565 if (desc->irq_data.chip->irq_set_wake) in set_irq_wake_real()
566 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); in set_irq_wake_real()
586 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_wake() local
589 if (!desc) in irq_set_irq_wake()
596 if (desc->wake_depth++ == 0) { in irq_set_irq_wake()
599 desc->wake_depth = 0; in irq_set_irq_wake()
601 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
604 if (desc->wake_depth == 0) { in irq_set_irq_wake()
606 } else if (--desc->wake_depth == 0) { in irq_set_irq_wake()
609 desc->wake_depth = 1; in irq_set_irq_wake()
611 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
614 irq_put_desc_busunlock(desc, flags); in irq_set_irq_wake()
627 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in can_request_irq() local
630 if (!desc) in can_request_irq()
633 if (irq_settings_can_request(desc)) { in can_request_irq()
634 if (!desc->action || in can_request_irq()
635 irqflags & desc->action->flags & IRQF_SHARED) in can_request_irq()
638 irq_put_desc_unlock(desc, flags); in can_request_irq()
642 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) in __irq_set_trigger() argument
644 struct irq_chip *chip = desc->irq_data.chip; in __irq_set_trigger()
653 irq_desc_get_irq(desc), in __irq_set_trigger()
661 if (!irqd_irq_masked(&desc->irq_data)) in __irq_set_trigger()
662 mask_irq(desc); in __irq_set_trigger()
663 if (!irqd_irq_disabled(&desc->irq_data)) in __irq_set_trigger()
668 ret = chip->irq_set_type(&desc->irq_data, flags); in __irq_set_trigger()
673 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); in __irq_set_trigger()
674 irqd_set(&desc->irq_data, flags); in __irq_set_trigger()
677 flags = irqd_get_trigger_type(&desc->irq_data); in __irq_set_trigger()
678 irq_settings_set_trigger_mask(desc, flags); in __irq_set_trigger()
679 irqd_clear(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
680 irq_settings_clr_level(desc); in __irq_set_trigger()
682 irq_settings_set_level(desc); in __irq_set_trigger()
683 irqd_set(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
690 flags, irq_desc_get_irq(desc), chip->irq_set_type); in __irq_set_trigger()
693 unmask_irq(desc); in __irq_set_trigger()
701 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_parent() local
703 if (!desc) in irq_set_parent()
706 desc->parent_irq = parent_irq; in irq_set_parent()
708 irq_put_desc_unlock(desc, flags); in irq_set_parent()
762 static void irq_finalize_oneshot(struct irq_desc *desc, in irq_finalize_oneshot() argument
765 if (!(desc->istate & IRQS_ONESHOT) || in irq_finalize_oneshot()
769 chip_bus_lock(desc); in irq_finalize_oneshot()
770 raw_spin_lock_irq(&desc->lock); in irq_finalize_oneshot()
786 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { in irq_finalize_oneshot()
787 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
788 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
801 desc->threads_oneshot &= ~action->thread_mask; in irq_finalize_oneshot()
803 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && in irq_finalize_oneshot()
804 irqd_irq_masked(&desc->irq_data)) in irq_finalize_oneshot()
805 unmask_threaded_irq(desc); in irq_finalize_oneshot()
808 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
809 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
817 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) in irq_thread_check_affinity() argument
834 raw_spin_lock_irq(&desc->lock); in irq_thread_check_affinity()
839 if (desc->irq_common_data.affinity) in irq_thread_check_affinity()
840 cpumask_copy(mask, desc->irq_common_data.affinity); in irq_thread_check_affinity()
843 raw_spin_unlock_irq(&desc->lock); in irq_thread_check_affinity()
851 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } in irq_thread_check_affinity() argument
861 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) in irq_forced_thread_fn() argument
867 irq_finalize_oneshot(desc, action); in irq_forced_thread_fn()
877 static irqreturn_t irq_thread_fn(struct irq_desc *desc, in irq_thread_fn() argument
883 irq_finalize_oneshot(desc, action); in irq_thread_fn()
887 static void wake_threads_waitq(struct irq_desc *desc) in wake_threads_waitq() argument
889 if (atomic_dec_and_test(&desc->threads_active)) in wake_threads_waitq()
890 wake_up(&desc->wait_for_threads); in wake_threads_waitq()
896 struct irq_desc *desc; in irq_thread_dtor() local
908 desc = irq_to_desc(action->irq); in irq_thread_dtor()
914 wake_threads_waitq(desc); in irq_thread_dtor()
917 irq_finalize_oneshot(desc, action); in irq_thread_dtor()
920 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) in irq_wake_secondary() argument
927 raw_spin_lock_irq(&desc->lock); in irq_wake_secondary()
928 __irq_wake_thread(desc, secondary); in irq_wake_secondary()
929 raw_spin_unlock_irq(&desc->lock); in irq_wake_secondary()
939 struct irq_desc *desc = irq_to_desc(action->irq); in irq_thread() local
940 irqreturn_t (*handler_fn)(struct irq_desc *desc, in irq_thread()
952 irq_thread_check_affinity(desc, action); in irq_thread()
957 irq_thread_check_affinity(desc, action); in irq_thread()
959 action_ret = handler_fn(desc, action); in irq_thread()
961 atomic_inc(&desc->threads_handled); in irq_thread()
963 irq_wake_secondary(desc, action); in irq_thread()
965 wake_threads_waitq(desc); in irq_thread()
989 struct irq_desc *desc = irq_to_desc(irq); in irq_wake_thread() local
993 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in irq_wake_thread()
996 raw_spin_lock_irqsave(&desc->lock, flags); in irq_wake_thread()
997 for (action = desc->action; action; action = action->next) { in irq_wake_thread()
1000 __irq_wake_thread(desc, action); in irq_wake_thread()
1004 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_wake_thread()
1040 static int irq_request_resources(struct irq_desc *desc) in irq_request_resources() argument
1042 struct irq_data *d = &desc->irq_data; in irq_request_resources()
1048 static void irq_release_resources(struct irq_desc *desc) in irq_release_resources() argument
1050 struct irq_data *d = &desc->irq_data; in irq_release_resources()
1104 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) in __setup_irq() argument
1111 if (!desc) in __setup_irq()
1114 if (desc->irq_data.chip == &no_irq_chip) in __setup_irq()
1116 if (!try_module_get(desc->owner)) in __setup_irq()
1125 nested = irq_settings_is_nested_thread(desc); in __setup_irq()
1138 if (irq_settings_can_thread(desc)) { in __setup_irq()
1175 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) in __setup_irq()
1181 raw_spin_lock_irqsave(&desc->lock, flags); in __setup_irq()
1182 old_ptr = &desc->action; in __setup_irq()
1253 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { in __setup_irq()
1276 ret = irq_request_resources(desc); in __setup_irq()
1279 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1283 init_waitqueue_head(&desc->wait_for_threads); in __setup_irq()
1287 ret = __irq_set_trigger(desc, in __setup_irq()
1294 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ in __setup_irq()
1296 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in __setup_irq()
1299 irqd_set(&desc->irq_data, IRQD_PER_CPU); in __setup_irq()
1300 irq_settings_set_per_cpu(desc); in __setup_irq()
1304 desc->istate |= IRQS_ONESHOT; in __setup_irq()
1306 if (irq_settings_can_autoenable(desc)) in __setup_irq()
1307 irq_startup(desc, true); in __setup_irq()
1310 desc->depth = 1; in __setup_irq()
1314 irq_settings_set_no_balancing(desc); in __setup_irq()
1315 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in __setup_irq()
1319 setup_affinity(desc, mask); in __setup_irq()
1323 unsigned int omsk = irq_settings_get_trigger_mask(desc); in __setup_irq()
1333 irq_pm_install_action(desc, new); in __setup_irq()
1336 desc->irq_count = 0; in __setup_irq()
1337 desc->irqs_unhandled = 0; in __setup_irq()
1343 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { in __setup_irq()
1344 desc->istate &= ~IRQS_SPURIOUS_DISABLED; in __setup_irq()
1345 __enable_irq(desc); in __setup_irq()
1348 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1359 register_irq_proc(irq, desc); in __setup_irq()
1377 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1396 module_put(desc->owner); in __setup_irq()
1410 struct irq_desc *desc = irq_to_desc(irq); in setup_irq() local
1412 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) in setup_irq()
1414 chip_bus_lock(desc); in setup_irq()
1415 retval = __setup_irq(irq, desc, act); in setup_irq()
1416 chip_bus_sync_unlock(desc); in setup_irq()
1428 struct irq_desc *desc = irq_to_desc(irq); in __free_irq() local
1434 if (!desc) in __free_irq()
1437 chip_bus_lock(desc); in __free_irq()
1438 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1444 action_ptr = &desc->action; in __free_irq()
1450 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1451 chip_bus_sync_unlock(desc); in __free_irq()
1463 irq_pm_remove_action(desc, action); in __free_irq()
1466 if (!desc->action) { in __free_irq()
1467 irq_settings_clr_disable_unlazy(desc); in __free_irq()
1468 irq_shutdown(desc); in __free_irq()
1469 irq_release_resources(desc); in __free_irq()
1474 if (WARN_ON_ONCE(desc->affinity_hint)) in __free_irq()
1475 desc->affinity_hint = NULL; in __free_irq()
1478 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1479 chip_bus_sync_unlock(desc); in __free_irq()
1511 module_put(desc->owner); in __free_irq()
1525 struct irq_desc *desc = irq_to_desc(irq); in remove_irq() local
1527 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) in remove_irq()
1548 struct irq_desc *desc = irq_to_desc(irq); in free_irq() local
1550 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_irq()
1554 if (WARN_ON(desc->affinity_notify)) in free_irq()
1555 desc->affinity_notify = NULL; in free_irq()
1609 struct irq_desc *desc; in request_threaded_irq() local
1626 desc = irq_to_desc(irq); in request_threaded_irq()
1627 if (!desc) in request_threaded_irq()
1630 if (!irq_settings_can_request(desc) || in request_threaded_irq()
1631 WARN_ON(irq_settings_is_per_cpu_devid(desc))) in request_threaded_irq()
1650 chip_bus_lock(desc); in request_threaded_irq()
1651 retval = __setup_irq(irq, desc, action); in request_threaded_irq()
1652 chip_bus_sync_unlock(desc); in request_threaded_irq()
1702 struct irq_desc *desc = irq_to_desc(irq); in request_any_context_irq() local
1705 if (!desc) in request_any_context_irq()
1708 if (irq_settings_is_nested_thread(desc)) { in request_any_context_irq()
1723 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in enable_percpu_irq() local
1725 if (!desc) in enable_percpu_irq()
1732 ret = __irq_set_trigger(desc, type); in enable_percpu_irq()
1740 irq_percpu_enable(desc, cpu); in enable_percpu_irq()
1742 irq_put_desc_unlock(desc, flags); in enable_percpu_irq()
1750 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in disable_percpu_irq() local
1752 if (!desc) in disable_percpu_irq()
1755 irq_percpu_disable(desc, cpu); in disable_percpu_irq()
1756 irq_put_desc_unlock(desc, flags); in disable_percpu_irq()
1765 struct irq_desc *desc = irq_to_desc(irq); in __free_percpu_irq() local
1771 if (!desc) in __free_percpu_irq()
1774 raw_spin_lock_irqsave(&desc->lock, flags); in __free_percpu_irq()
1776 action = desc->action; in __free_percpu_irq()
1782 if (!cpumask_empty(desc->percpu_enabled)) { in __free_percpu_irq()
1784 irq, cpumask_first(desc->percpu_enabled)); in __free_percpu_irq()
1789 desc->action = NULL; in __free_percpu_irq()
1791 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
1795 module_put(desc->owner); in __free_percpu_irq()
1799 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
1812 struct irq_desc *desc = irq_to_desc(irq); in remove_percpu_irq() local
1814 if (desc && irq_settings_is_per_cpu_devid(desc)) in remove_percpu_irq()
1832 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_irq() local
1834 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_irq()
1837 chip_bus_lock(desc); in free_percpu_irq()
1839 chip_bus_sync_unlock(desc); in free_percpu_irq()
1852 struct irq_desc *desc = irq_to_desc(irq); in setup_percpu_irq() local
1855 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in setup_percpu_irq()
1857 chip_bus_lock(desc); in setup_percpu_irq()
1858 retval = __setup_irq(irq, desc, act); in setup_percpu_irq()
1859 chip_bus_sync_unlock(desc); in setup_percpu_irq()
1884 struct irq_desc *desc; in request_percpu_irq() local
1890 desc = irq_to_desc(irq); in request_percpu_irq()
1891 if (!desc || !irq_settings_can_request(desc) || in request_percpu_irq()
1892 !irq_settings_is_per_cpu_devid(desc)) in request_percpu_irq()
1904 chip_bus_lock(desc); in request_percpu_irq()
1905 retval = __setup_irq(irq, desc, action); in request_percpu_irq()
1906 chip_bus_sync_unlock(desc); in request_percpu_irq()
1931 struct irq_desc *desc; in irq_get_irqchip_state() local
1937 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_get_irqchip_state()
1938 if (!desc) in irq_get_irqchip_state()
1941 data = irq_desc_get_irq_data(desc); in irq_get_irqchip_state()
1957 irq_put_desc_busunlock(desc, flags); in irq_get_irqchip_state()
1977 struct irq_desc *desc; in irq_set_irqchip_state() local
1983 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_set_irqchip_state()
1984 if (!desc) in irq_set_irqchip_state()
1987 data = irq_desc_get_irq_data(desc); in irq_set_irqchip_state()
2003 irq_put_desc_busunlock(desc, flags); in irq_set_irqchip_state()