Lines Matching refs:desc

95 int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)  in arch_setup_msi_irq()  argument
103 err = chip->setup_irq(chip, dev, desc); in arch_setup_msi_irq()
107 irq_set_chip_data(desc->irq, chip); in arch_setup_msi_irq()
223 u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) in __pci_msi_desc_mask_irq() argument
225 u32 mask_bits = desc->masked; in __pci_msi_desc_mask_irq()
227 if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) in __pci_msi_desc_mask_irq()
232 pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); in __pci_msi_desc_mask_irq()
237 static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) in msi_mask_irq() argument
239 desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); in msi_mask_irq()
249 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) in __pci_msix_desc_mask_irq() argument
251 u32 mask_bits = desc->masked; in __pci_msix_desc_mask_irq()
252 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + in __pci_msix_desc_mask_irq()
261 writel(mask_bits, desc->mask_base + offset); in __pci_msix_desc_mask_irq()
266 static void msix_mask_irq(struct msi_desc *desc, u32 flag) in msix_mask_irq() argument
268 desc->masked = __pci_msix_desc_mask_irq(desc, flag); in msix_mask_irq()
273 struct msi_desc *desc = irq_data_get_msi(data); in msi_set_mask_bit() local
275 if (desc->msi_attrib.is_msix) { in msi_set_mask_bit()
276 msix_mask_irq(desc, flag); in msi_set_mask_bit()
277 readl(desc->mask_base); /* Flush write to device */ in msi_set_mask_bit()
279 unsigned offset = data->irq - desc->irq; in msi_set_mask_bit()
280 msi_mask_irq(desc, 1 << offset, flag << offset); in msi_set_mask_bit()
428 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); in alloc_msi_entry() local
429 if (!desc) in alloc_msi_entry()
432 INIT_LIST_HEAD(&desc->list); in alloc_msi_entry()
433 desc->dev = dev; in alloc_msi_entry()
435 return desc; in alloc_msi_entry()
913 struct msi_desc *desc; in pci_msi_shutdown() local
920 desc = list_first_entry(&dev->msi_list, struct msi_desc, list); in pci_msi_shutdown()
927 mask = msi_mask(desc->msi_attrib.multi_cap); in pci_msi_shutdown()
929 __pci_msi_desc_mask_irq(desc, mask, ~mask); in pci_msi_shutdown()
932 dev->irq = desc->msi_attrib.default_irq; in pci_msi_shutdown()
1181 struct msi_desc *desc = irq_data->msi_desc; in pci_msi_domain_write_msg() local
1187 if (desc->irq == irq_data->irq) in pci_msi_domain_write_msg()
1188 __pci_write_msi_msg(desc, msg); in pci_msi_domain_write_msg()
1199 struct msi_desc *desc) in pci_msi_domain_calc_hwirq() argument
1201 return (irq_hw_number_t)desc->msi_attrib.entry_nr | in pci_msi_domain_calc_hwirq()
1206 static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) in pci_msi_desc_is_multi_msi() argument
1208 return !desc->msi_attrib.is_msix && desc->nvec_used > 1; in pci_msi_desc_is_multi_msi()
1225 struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); in pci_msi_domain_check_cap() local
1228 if (pci_msi_desc_is_multi_msi(desc) && in pci_msi_domain_check_cap()
1231 else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) in pci_msi_domain_check_cap()
1238 struct msi_desc *desc, int error) in pci_msi_domain_handle_error() argument
1241 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) in pci_msi_domain_handle_error()
1249 struct msi_desc *desc) in pci_msi_domain_set_desc() argument
1251 arg->desc = desc; in pci_msi_domain_set_desc()
1252 arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), in pci_msi_domain_set_desc()
1253 desc); in pci_msi_domain_set_desc()