Lines Matching refs:desc

39 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)  in alloc_masks()  argument
41 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) in alloc_masks()
45 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { in alloc_masks()
46 free_cpumask_var(desc->irq_data.affinity); in alloc_masks()
53 static void desc_smp_init(struct irq_desc *desc, int node) in desc_smp_init() argument
55 desc->irq_data.node = node; in desc_smp_init()
56 cpumask_copy(desc->irq_data.affinity, irq_default_affinity); in desc_smp_init()
58 cpumask_clear(desc->pending_mask); in desc_smp_init()
62 static inline int desc_node(struct irq_desc *desc) in desc_node() argument
64 return desc->irq_data.node; in desc_node()
69 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } in alloc_masks() argument
70 static inline void desc_smp_init(struct irq_desc *desc, int node) { } in desc_smp_init() argument
71 static inline int desc_node(struct irq_desc *desc) { return 0; } in desc_node() argument
74 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, in desc_set_defaults() argument
79 desc->irq_data.irq = irq; in desc_set_defaults()
80 desc->irq_data.chip = &no_irq_chip; in desc_set_defaults()
81 desc->irq_data.chip_data = NULL; in desc_set_defaults()
82 desc->irq_data.handler_data = NULL; in desc_set_defaults()
83 desc->irq_data.msi_desc = NULL; in desc_set_defaults()
84 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); in desc_set_defaults()
85 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); in desc_set_defaults()
86 desc->handle_irq = handle_bad_irq; in desc_set_defaults()
87 desc->depth = 1; in desc_set_defaults()
88 desc->irq_count = 0; in desc_set_defaults()
89 desc->irqs_unhandled = 0; in desc_set_defaults()
90 desc->name = NULL; in desc_set_defaults()
91 desc->owner = owner; in desc_set_defaults()
93 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; in desc_set_defaults()
94 desc_smp_init(desc, node); in desc_set_defaults()
107 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) in irq_insert_desc() argument
109 radix_tree_insert(&irq_desc_tree, irq, desc); in irq_insert_desc()
124 static void free_masks(struct irq_desc *desc) in free_masks() argument
127 free_cpumask_var(desc->pending_mask); in free_masks()
129 free_cpumask_var(desc->irq_data.affinity); in free_masks()
132 static inline void free_masks(struct irq_desc *desc) { } in free_masks() argument
147 struct irq_desc *desc; in alloc_desc() local
150 desc = kzalloc_node(sizeof(*desc), gfp, node); in alloc_desc()
151 if (!desc) in alloc_desc()
154 desc->kstat_irqs = alloc_percpu(unsigned int); in alloc_desc()
155 if (!desc->kstat_irqs) in alloc_desc()
158 if (alloc_masks(desc, gfp, node)) in alloc_desc()
161 raw_spin_lock_init(&desc->lock); in alloc_desc()
162 lockdep_set_class(&desc->lock, &irq_desc_lock_class); in alloc_desc()
164 desc_set_defaults(irq, desc, node, owner); in alloc_desc()
166 return desc; in alloc_desc()
169 free_percpu(desc->kstat_irqs); in alloc_desc()
171 kfree(desc); in alloc_desc()
177 struct irq_desc *desc = irq_to_desc(irq); in free_desc() local
179 unregister_irq_proc(irq, desc); in free_desc()
191 free_masks(desc); in free_desc()
192 free_percpu(desc->kstat_irqs); in free_desc()
193 kfree(desc); in free_desc()
199 struct irq_desc *desc; in alloc_descs() local
203 desc = alloc_desc(start + i, node, owner); in alloc_descs()
204 if (!desc) in alloc_descs()
207 irq_insert_desc(start + i, desc); in alloc_descs()
233 struct irq_desc *desc; in early_irq_init() local
251 desc = alloc_desc(i, node, NULL); in early_irq_init()
253 irq_insert_desc(i, desc); in early_irq_init()
271 struct irq_desc *desc; in early_irq_init() local
277 desc = irq_desc; in early_irq_init()
281 desc[i].kstat_irqs = alloc_percpu(unsigned int); in early_irq_init()
282 alloc_masks(&desc[i], GFP_KERNEL, node); in early_irq_init()
283 raw_spin_lock_init(&desc[i].lock); in early_irq_init()
284 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); in early_irq_init()
285 desc_set_defaults(i, &desc[i], node, NULL); in early_irq_init()
298 struct irq_desc *desc = irq_to_desc(irq); in free_desc() local
301 raw_spin_lock_irqsave(&desc->lock, flags); in free_desc()
302 desc_set_defaults(irq, desc, desc_node(desc), NULL); in free_desc()
303 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_desc()
312 struct irq_desc *desc = irq_to_desc(start + i); in alloc_descs() local
314 desc->owner = owner; in alloc_descs()
347 struct irq_desc *desc = irq_to_desc(irq); in generic_handle_irq() local
349 if (!desc) in generic_handle_irq()
351 generic_handle_irq_desc(irq, desc); in generic_handle_irq()
542 struct irq_desc *desc = irq_to_desc(irq); in __irq_get_desc_lock() local
544 if (desc) { in __irq_get_desc_lock()
547 !irq_settings_is_per_cpu_devid(desc)) in __irq_get_desc_lock()
551 irq_settings_is_per_cpu_devid(desc)) in __irq_get_desc_lock()
556 chip_bus_lock(desc); in __irq_get_desc_lock()
557 raw_spin_lock_irqsave(&desc->lock, *flags); in __irq_get_desc_lock()
559 return desc; in __irq_get_desc_lock()
562 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) in __irq_put_desc_unlock() argument
564 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_put_desc_unlock()
566 chip_bus_sync_unlock(desc); in __irq_put_desc_unlock()
571 struct irq_desc *desc = irq_to_desc(irq); in irq_set_percpu_devid() local
573 if (!desc) in irq_set_percpu_devid()
576 if (desc->percpu_enabled) in irq_set_percpu_devid()
579 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); in irq_set_percpu_devid()
581 if (!desc->percpu_enabled) in irq_set_percpu_devid()
604 struct irq_desc *desc = irq_to_desc(irq); in kstat_irqs_cpu() local
606 return desc && desc->kstat_irqs ? in kstat_irqs_cpu()
607 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; in kstat_irqs_cpu()
620 struct irq_desc *desc = irq_to_desc(irq); in kstat_irqs() local
624 if (!desc || !desc->kstat_irqs) in kstat_irqs()
627 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); in kstat_irqs()